text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
import warnings
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.io.vasp.sets import MPRelaxSet
class StandardTransmuter(object):
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
#need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to create
vasp input files from structures
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Callable to create subdirectory name from
transformed_structure. e.g.,
lambda x: x.other_parameters["tags"][0] to use the first tag.
include_cif (bool): Whether to output a CIF as well. CIF files
are generally better supported in visualization programs.
"""
batch_write_vasp_input(self.transformed_structures, **kwargs)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match(r"^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
super(CifTransmuter, self).__init__(transformed_structures,
transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super(PoscarTransmuter, self).__init__([tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
def batch_write_vasp_input(transformed_structures, vasp_input_set=MPRelaxSet,
output_dir=".", create_directory=True,
subfolder=None,
include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory, **kwargs)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
|
xhqu1981/pymatgen
|
pymatgen/alchemy/transmuters.py
|
Python
|
mit
| 16,333
|
[
"VASP",
"pymatgen"
] |
b11a468463f980096850861d1614d0228c8bddaa8f63e9d6bcd9b47b52072649
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
GrassAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import time
import uuid
import importlib
import re
from PyQt4.QtCore import QCoreApplication
from PyQt4.QtGui import QIcon
from qgis.core import QgsRasterLayer
from qgis.utils import iface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import getParameterFromString, ParameterVector, ParameterMultipleInput, ParameterExtent, ParameterNumber, ParameterSelection, ParameterRaster, ParameterTable, ParameterBoolean, ParameterString
from processing.core.outputs import getOutputFromString, OutputRaster, OutputVector, OutputFile, OutputHTML
from GrassUtils import GrassUtils
from processing.tools import dataobjects, system
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class GrassAlgorithm(GeoAlgorithm):
GRASS_OUTPUT_TYPE_PARAMETER = 'GRASS_OUTPUT_TYPE_PARAMETER'
GRASS_MIN_AREA_PARAMETER = 'GRASS_MIN_AREA_PARAMETER'
GRASS_SNAP_TOLERANCE_PARAMETER = 'GRASS_SNAP_TOLERANCE_PARAMETER'
GRASS_REGION_EXTENT_PARAMETER = 'GRASS_REGION_PARAMETER'
GRASS_REGION_CELLSIZE_PARAMETER = 'GRASS_REGION_CELLSIZE_PARAMETER'
GRASS_REGION_ALIGN_TO_RESOLUTION = '-a_r.region'
OUTPUT_TYPES = ['auto', 'point', 'line', 'area']
def __init__(self, descriptionfile):
GeoAlgorithm.__init__(self)
self.hardcodedStrings = []
self.descriptionFile = descriptionfile
self.defineCharacteristicsFromFile()
self.numExportedLayers = 0
def getCopy(self):
newone = GrassAlgorithm(self.descriptionFile)
newone.provider = self.provider
return newone
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'grass.png'))
def help(self):
return False, 'http://grass.osgeo.org/grass64/manuals/' + self.grassName + '.html'
def getParameterDescriptions(self):
descs = {}
_, helpfile = self.help()
try:
infile = open(helpfile)
lines = infile.readlines()
for i in range(len(lines)):
if lines[i].startswith('<DT><b>'):
for param in self.parameters:
searchLine = '<b>' + param.name + '</b>'
if searchLine in lines[i]:
i += 1
descs[param.name] = (lines[i])[4:-6]
break
infile.close()
except Exception:
pass
return descs
def defineCharacteristicsFromFile(self):
lines = open(self.descriptionFile)
line = lines.readline().strip('\n').strip()
self.grassName = line
line = lines.readline().strip('\n').strip()
self.name = line
self.i18n_name = QCoreApplication.translate("GrassAlgorithm", line)
if " - " not in self.name:
self.name = self.grassName + " - " + self.name
self.i18n_name = self.grassName + " - " + self.i18n_name
line = lines.readline().strip('\n').strip()
self.group = line
self.i18n_group = QCoreApplication.translate("GrassAlgorithm", line)
hasRasterOutput = False
hasVectorInput = False
vectorOutputs = 0
line = lines.readline().strip('\n').strip()
while line != '':
try:
line = line.strip('\n').strip()
if line.startswith('Hardcoded'):
self.hardcodedStrings.append(line[len('Hardcoded|'):])
elif line.startswith('Parameter'):
parameter = getParameterFromString(line)
self.addParameter(parameter)
if isinstance(parameter, ParameterVector):
hasVectorInput = True
if isinstance(parameter, ParameterMultipleInput) \
and parameter.datatype < 3:
hasVectorInput = True
elif line.startswith('*Parameter'):
param = getParameterFromString(line[1:])
param.isAdvanced = True
self.addParameter(param)
else:
output = getOutputFromString(line)
self.addOutput(output)
if isinstance(output, OutputRaster):
hasRasterOutput = True
elif isinstance(output, OutputVector):
vectorOutputs += 1
if isinstance(output, OutputHTML):
self.addOutput(OutputFile("rawoutput", output.description +
" (raw output)", "txt"))
line = lines.readline().strip('\n').strip()
except Exception as e:
ProcessingLog.addToLog(
ProcessingLog.LOG_ERROR,
self.tr('Could not open GRASS algorithm: %s.\n%s' % (self.descriptionFile, line)))
raise e
lines.close()
self.addParameter(ParameterExtent(
self.GRASS_REGION_EXTENT_PARAMETER,
self.tr('GRASS region extent'))
)
if hasRasterOutput:
self.addParameter(ParameterNumber(
self.GRASS_REGION_CELLSIZE_PARAMETER,
self.tr('GRASS region cellsize (leave 0 for default)'),
0, None, 0.0))
if hasVectorInput:
param = ParameterNumber(self.GRASS_SNAP_TOLERANCE_PARAMETER,
'v.in.ogr snap tolerance (-1 = no snap)',
-1, None, -1.0)
param.isAdvanced = True
self.addParameter(param)
param = ParameterNumber(self.GRASS_MIN_AREA_PARAMETER,
'v.in.ogr min area', 0, None, 0.0001)
param.isAdvanced = True
self.addParameter(param)
if vectorOutputs == 1:
param = ParameterSelection(self.GRASS_OUTPUT_TYPE_PARAMETER,
'v.out.ogr output type',
self.OUTPUT_TYPES)
param.isAdvanced = True
self.addParameter(param)
def getDefaultCellsize(self):
cellsize = 0
for param in self.parameters:
if param.value:
if isinstance(param, ParameterRaster):
if isinstance(param.value, QgsRasterLayer):
layer = param.value
else:
layer = dataobjects.getObjectFromUri(param.value)
cellsize = max(cellsize, (layer.extent().xMaximum()
- layer.extent().xMinimum())
/ layer.width())
elif isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObjectFromUri(layername)
if isinstance(layer, QgsRasterLayer):
cellsize = max(cellsize, (
layer.extent().xMaximum()
- layer.extent().xMinimum())
/ layer.width()
)
if cellsize == 0:
cellsize = 100
return cellsize
def processAlgorithm(self, progress):
if system.isWindows():
path = GrassUtils.grassPath()
if path == '':
raise GeoAlgorithmExecutionException(
self.tr('GRASS folder is not configured.\nPlease '
'configure it before running GRASS algorithms.'))
commands = []
self.exportedLayers = {}
outputCommands = []
# If GRASS session has been created outside of this algorithm then
# get the list of layers loaded in GRASS otherwise start a new
# session
existingSession = GrassUtils.sessionRunning
if existingSession:
self.exportedLayers = GrassUtils.getSessionLayers()
else:
GrassUtils.startGrassSession()
# 1: Export layer to grass mapset
for param in self.parameters:
if isinstance(param, ParameterRaster):
if param.value is None:
continue
value = param.value
# Check if the layer hasn't already been exported in, for
# example, previous GRASS calls in this session
if value in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(value, commands)
commands.append(self.exportRasterLayer(value))
if isinstance(param, ParameterVector):
if param.value is None:
continue
value = param.value
if value in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(value, commands)
commands.append(self.exportVectorLayer(value))
if isinstance(param, ParameterTable):
pass
if isinstance(param, ParameterMultipleInput):
if param.value is None:
continue
layers = param.value.split(';')
if layers is None or len(layers) == 0:
continue
if param.datatype == ParameterMultipleInput.TYPE_RASTER:
for layer in layers:
if layer in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(layer, commands)
commands.append(self.exportRasterLayer(layer))
elif param.datatype == ParameterMultipleInput.TYPE_VECTOR_ANY:
for layer in layers:
if layer in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(layer, commands)
commands.append(self.exportVectorLayer(layer))
self.setSessionProjectionFromProject(commands)
region = \
unicode(self.getParameterValue(self.GRASS_REGION_EXTENT_PARAMETER))
regionCoords = region.split(',')
command = 'g.region'
command += ' n=' + unicode(regionCoords[3])
command += ' s=' + unicode(regionCoords[2])
command += ' e=' + unicode(regionCoords[1])
command += ' w=' + unicode(regionCoords[0])
cellsize = self.getParameterValue(self.GRASS_REGION_CELLSIZE_PARAMETER)
if cellsize:
command += ' res=' + unicode(cellsize)
else:
command += ' res=' + unicode(self.getDefaultCellsize())
alignToResolution = \
self.getParameterValue(self.GRASS_REGION_ALIGN_TO_RESOLUTION)
if alignToResolution:
command += ' -a'
commands.append(command)
# 2: Set parameters and outputs
command = self.grassName
command += ' ' + ' '.join(self.hardcodedStrings)
for param in self.parameters:
if param.value is None or param.value == '':
continue
if param.name in [self.GRASS_REGION_CELLSIZE_PARAMETER, self.GRASS_REGION_EXTENT_PARAMETER,
self.GRASS_MIN_AREA_PARAMETER, self.GRASS_SNAP_TOLERANCE_PARAMETER,
self.GRASS_OUTPUT_TYPE_PARAMETER, self.GRASS_REGION_ALIGN_TO_RESOLUTION]:
continue
if isinstance(param, (ParameterRaster, ParameterVector)):
value = param.value
if value in self.exportedLayers.keys():
command += ' %s="%s"' % (param.name, self.exportedLayers[value])
else:
command += ' %s="%s"' % (param.name, value)
elif isinstance(param, ParameterMultipleInput):
s = param.value
for layer in self.exportedLayers.keys():
s = s.replace(layer, self.exportedLayers[layer])
s = s.replace(';', ',')
command += ' %s="%s"' % (param.name, s)
elif isinstance(param, ParameterBoolean):
if param.value:
command += ' ' + param.name
elif isinstance(param, ParameterSelection):
idx = int(param.value)
command += ' ' + param.name + '=' + unicode(param.options[idx])
elif isinstance(param, ParameterString):
command += ' ' + param.name + '="' + unicode(param.value) + '"'
else:
command += ' ' + param.name + '="' + unicode(param.value) + '"'
uniqueSufix = unicode(uuid.uuid4()).replace('-', '')
for out in self.outputs:
if isinstance(out, OutputFile):
command += ' > ' + out.value
elif not isinstance(out, OutputHTML):
# We add an output name to make sure it is unique if the session
# uses this algorithm several times.
uniqueOutputName = out.name + uniqueSufix
command += ' ' + out.name + '=' + uniqueOutputName
# Add output file to exported layers, to indicate that
# they are present in GRASS
self.exportedLayers[out.value] = uniqueOutputName
command += ' --overwrite'
commands.append(command)
# 3: Export resulting layers to a format that qgis can read
for out in self.outputs:
if isinstance(out, OutputRaster):
filename = out.getCompatibleFileName(self)
# Raster layer output: adjust region to layer before
# exporting
commands.append('g.region rast=' + out.name + uniqueSufix)
outputCommands.append('g.region rast=' + out.name
+ uniqueSufix)
if self.grassName == 'r.composite':
command = 'r.out.tiff -t --verbose'
command += ' input='
command += out.name + uniqueSufix
command += ' output="' + filename + '"'
commands.append(command)
outputCommands.append(command)
else:
command = 'r.out.gdal -c createopt="TFW=YES,COMPRESS=LZW"'
command += ' input='
if self.grassName == 'r.horizon':
command += out.name + uniqueSufix + '_0'
else:
command += out.name + uniqueSufix
command += ' output="' + filename + '"'
commands.append(command)
outputCommands.append(command)
if isinstance(out, OutputVector):
filename = out.getCompatibleFileName(self)
command = 'v.out.ogr -s -c -e -z input=' + out.name + uniqueSufix
command += ' dsn="' + os.path.dirname(filename) + '"'
command += ' format=ESRI_Shapefile'
command += ' olayer="%s"' % os.path.splitext(os.path.basename(filename))[0]
typeidx = \
self.getParameterValue(self.GRASS_OUTPUT_TYPE_PARAMETER)
outtype = ('auto' if typeidx
is None else self.OUTPUT_TYPES[typeidx])
command += ' type=' + outtype
commands.append(command)
outputCommands.append(command)
# 4: Run GRASS
loglines = []
loglines.append(self.tr('GRASS execution commands'))
for line in commands:
progress.setCommand(line)
loglines.append(line)
if ProcessingConfig.getSetting(GrassUtils.GRASS_LOG_COMMANDS):
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
GrassUtils.executeGrass(commands, progress, outputCommands)
for out in self.outputs:
if isinstance(out, OutputHTML):
with open(self.getOutputFromName("rawoutput").value) as f:
rawOutput = "".join(f.readlines())
with open(out.value, "w") as f:
f.write("<pre>%s</pre>" % rawOutput)
# If the session has been created outside of this algorithm, add
# the new GRASS layers to it otherwise finish the session
if existingSession:
GrassUtils.addSessionLayers(self.exportedLayers)
else:
GrassUtils.endGrassSession()
def exportVectorLayer(self, orgFilename):
# TODO: improve this. We are now exporting if it is not a shapefile,
# but the functionality of v.in.ogr could be used for this.
# We also export if there is a selection
if not os.path.exists(orgFilename) or not orgFilename.endswith('shp'):
layer = dataobjects.getObjectFromUri(orgFilename, False)
if layer:
filename = dataobjects.exportVectorLayer(layer)
else:
layer = dataobjects.getObjectFromUri(orgFilename, False)
if layer:
useSelection = \
ProcessingConfig.getSetting(ProcessingConfig.USE_SELECTED)
if useSelection and layer.selectedFeatureCount() != 0:
filename = dataobjects.exportVectorLayer(layer)
else:
filename = orgFilename
else:
filename = orgFilename
destFilename = self.getTempFilename()
self.exportedLayers[orgFilename] = destFilename
command = 'v.in.ogr'
min_area = self.getParameterValue(self.GRASS_MIN_AREA_PARAMETER)
command += ' min_area=' + unicode(min_area)
snap = self.getParameterValue(self.GRASS_SNAP_TOLERANCE_PARAMETER)
command += ' snap=' + unicode(snap)
command += ' dsn="%s"' % os.path.dirname(filename)
command += ' layer="%s"' % os.path.splitext(os.path.basename(filename)[:-4])[0]
command += ' output=' + destFilename
command += ' --overwrite -o'
return command
def setSessionProjectionFromProject(self, commands):
if not GrassUtils.projectionSet:
proj4 = iface.mapCanvas().mapRenderer().destinationCrs().toProj4()
command = 'g.proj'
command += ' -c'
command += ' proj4="' + proj4 + '"'
commands.append(command)
GrassUtils.projectionSet = True
def setSessionProjectionFromLayer(self, layer, commands):
if not GrassUtils.projectionSet:
qGisLayer = dataobjects.getObjectFromUri(layer)
if qGisLayer:
proj4 = unicode(qGisLayer.crs().toProj4())
command = 'g.proj'
command += ' -c'
command += ' proj4="' + proj4 + '"'
commands.append(command)
GrassUtils.projectionSet = True
def exportRasterLayer(self, layer):
destFilename = self.getTempFilename()
self.exportedLayers[layer] = destFilename
if bool(re.match('netcdf', layer, re.I)) or bool(re.match('hdf', layer, re.I)):
command = 'r.in.gdal'
else:
command = 'r.external -r'
command += ' input="' + layer + '"'
command += ' band=1'
command += ' output=' + destFilename
command += ' --overwrite -o'
return command
def getTempFilename(self):
filename = 'tmp' + unicode(time.time()).replace('.', '') \
+ unicode(system.getNumExportedLayers())
return filename
def commandLineName(self):
return 'grass:' + self.name[:self.name.find(' ')]
def checkBeforeOpeningParametersDialog(self):
msg = GrassUtils.checkGrassIsInstalled()
if msg is not None:
return msg
def checkParameterValuesBeforeExecuting(self):
name = self.commandLineName().replace('.', '_')[len('grass:'):]
try:
module = importlib.import_module('processing.algs.grass.ext.' + name)
except ImportError:
return
if hasattr(module, 'checkParameterValuesBeforeExecuting'):
func = getattr(module, 'checkParameterValuesBeforeExecuting')
return func(self)
|
landryb/QGIS
|
python/plugins/processing/algs/grass/GrassAlgorithm.py
|
Python
|
gpl-2.0
| 21,911
|
[
"NetCDF"
] |
2d98a4ddfbcb85d16c15c23751101b38e4af16fbf535943d48630a70d3848e70
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module of helper functions for ccresponse distributed property calculations.
Defines functions for interacting with the database created by the run_XXX
driver function.
Properties that are able to use this module should be added to
the registered_props dictionary.
"""
import os
import collections
from psi4 import core
from psi4.driver import p4util
def generate_inputs(db,name):
"""
Generates the input files in each sub-directory of the
distributed finite differences property calculation.
name: ( string ) method name passed to calling driver,
db: (database) The database object associated with this property
calculation. On exit this db['inputs_generated'] has been set True
Returns: nothing
Throws: Exception if the number of atomic displacements is not correct.
"""
molecule = core.get_active_molecule()
natom = molecule.natom()
# get list of displacements
displacement_geoms = core.atomic_displacements(molecule)
# Sanity Check
# there should be 3 cords * natoms *2 directions (+/-)
if not (6 * natom) == len(displacement_geoms):
raise Exception('The number of atomic displacements should be 6 times'
' the number of atoms!')
displacement_names = db['job_status'].keys()
for n, entry in enumerate(displacement_names):
if not os.path.exists(entry):
os.makedirs(entry)
# Setup up input file string
inp_template = 'molecule {molname}_{disp}'
inp_template += ' {{\n{molecule_info}\n}}\n{options}\n{jobspec}\n'
molecule.set_geometry(displacement_geoms[n])
molecule.fix_orientation(True)
molecule.fix_com(True)
inputfile = open('{0}/input.dat'.format(entry), 'w')
inputfile.write("# This is a psi4 input file auto-generated for"
"computing properties by finite differences.\n\n")
inputfile.write(
inp_template.format(
molname=molecule.name(),
disp=entry,
molecule_info=molecule.create_psi4_string_from_molecule(),
options=p4util.format_options_for_input(),
jobspec=db['prop_cmd']))
inputfile.close()
db['inputs_generated'] = True
# END generate_inputs
def initialize_database(database, name, prop, properties_array, additional_kwargs=None):
"""
Initialize the database for computation of some property
using distributed finite differences driver
database: (database) the database object passed from the caller
name: (string) name as passed to calling driver
prop: (string) the property being computed, used to add xxx_computed flag
to database
prop_array: (list of strings) properties to go in
properties kwarg of the property() cmd in each sub-dir
additional_kwargs: (list of strings) *optional*
any additional kwargs that should go in the call to the
property() driver method in each subdir
Returns: nothing
Throws: nothing
"""
database['inputs_generated'] = False
database['jobs_complete'] = False
prop_cmd ="property('{0}',".format(name)
prop_cmd += "properties=[ '{}' ".format(properties_array[0])
if len(properties_array) > 1:
for element in properties_array[1:]:
prop_cmd += ",'{}'".format(element)
prop_cmd += "]"
if additional_kwargs is not None:
for arg in additional_kwargs:
prop_cmd += ", {}".format(arg)
prop_cmd += ")"
database['prop_cmd'] = prop_cmd
database['job_status'] = collections.OrderedDict()
# Populate the job_status dict
molecule = core.get_active_molecule()
natom = molecule.natom()
coordinates = ['x', 'y', 'z']
#step_direction = ['p', 'm'] changing due to change in findif atomic_displacements
step_direction = ['m', 'p']
for atom in range(1, natom + 1):
for coord in coordinates:
for step in step_direction:
job_name = '{}_{}_{}'.format(atom, coord, step)
database['job_status'].update({job_name: 'not_started'})
database['{}_computed'.format(prop)] = False
# END initialize_database()
def stat(db):
"""
Checks displacement sub_directories for the status of each
displacement computation
db: (database) the database storing information for this distributed
property calculation
Returns: nothing
Throws: nothing
"""
n_finished = 0
for job, status in db['job_status'].items():
if status == 'finished':
n_finished += 1
elif status in ('not_started', 'running'):
try:
with open("{}/output.dat".format(job)) as outfile:
outfile.seek(-150, 2)
for line in outfile:
if 'Psi4 exiting successfully' in line:
db['job_status'][job] = 'finished'
n_finished += 1
break
else:
db['job_status'][job] = 'running'
except:
pass
# check all jobs done?
if n_finished == len(db['job_status'].keys()):
db['jobs_complete'] = True
# END stat()
|
susilehtola/psi4
|
psi4/driver/procrouting/findif_response_utils/db_helper.py
|
Python
|
lgpl-3.0
| 6,234
|
[
"Psi4"
] |
9b08b26554a869b43c677e1f4e38ef94f1cacc9482cf669a840d89651a2f1e2c
|
#!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2007, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
#
# gross_te43.py
#
# Exercise 4.3 on p. 182 of Gross & Harris
# Same parameters as Ex. 4.2 but with 2 workload classes
# Solve traffic eqns using NumPy, rather than PDQ-MSQ
#
# Created by NJG on Sun, Aug 26, 2007
# Updated by NJG on Mon, Aug 27, 2007
import sys
from numpy import *
from numpy.linalg import solve
def ErlangC(servers, erlangs):
if (erlangs >= servers):
print "Error: %4.2f Erlangs > %d servers" % (erlangs, servers)
sys.exit()
rho = erlangs / servers
erlangB = erlangs / (1 + erlangs)
for mm in range(2, servers+1):
eb = erlangB
erlangB = eb * erlangs / (mm + (eb * erlangs))
erlangC = erlangB / (1 - rho + (rho * erlangB))
return(erlangC)
# Traffic equations
"""
Let subscript 'a' denote type-1 customers and 'b' type-2 customers.
L = 35 ... external arrival rate per HOUR
The traffic equations can be obtained from the R matrices in G&H by
reading each column vertically in the order 1,2,3.
From the R_(1) matrix we get:
La1 = 0.00 La1 + 0.00 La2 + 0.00 La3 = 0.55 L
La2 = 1.00 La1 + 0.00 La2 + 0.00 La3
La3 = 0.00 La1 + 0.02 La2 + 0.00 La3
Rearrange terms to produce the Aa coefficient matrix below:
1.00 La1 + 0.00 La2 + 0.00 La3 = 0.55 L
1.00 La1 - 1.00 La2 + 0.00 La3 = 0.0
0.00 La1 + 0.02 La2 - 1.00 La3 = 0.0
Similary, from the R_(2) matrix we get:
La1 = 0.00 La1 + 0.00 La2 + 0.00 La3 = 0.45 L
La2 = 0.00 La1 + 0.00 La2 + 0.01 La3
La3 = 1.00 La1 + 0.02 La2 + 0.00 La3
which, on rearragement gives for Ab below:
1.00 La1 + 0.00 La2 + 0.00 La3 = 0.45 L
0.00 La1 - 1.00 La2 + 0.01 La3 = 0.0
1.00 La1 + 0.02 La2 - 1.00 La3 = 0.0
"""
# Matices of coeffs
Aa = array([[1.00, 0.00, 0.00],
[1.00, -1.00, 0.00],
[0.00, 0.02, -1.00]])
Ab = array([[1.00, 0.00, 0.00],
[0.00, -1.00, 0.01],
[1.00, 0.00, -1.00]])
# Fraction of total traffic L going to 'a' and 'b' streams
fLa = 0.55 * 35
fLb = 0.45 * 35
# RHS of the traffic eqns
Ba = array([fLa, 0.0, 0.0])
Bb = array([fLb, 0.0, 0.0])
# Solve the traffic eqns for the local arrivals
La = solve(Aa, Ba)
Lb = solve(Ab, Bb)
print "Arrival ratesA: %7.4f %7.4f %7.4f" % (La[0], La[1], La[2])
print "Arrival ratesB: %7.4f %7.4f %7.4f" % (Lb[0], Lb[1], Lb[2])
# Server capacity
m = array([1, 3, 7])
print "Server cap: %7d %7d %7d" % (m[0], m[1], m[2])
# Visit ratios (v_kc = L_kc / Lc)
va = array([La[0]/fLa, La[1]/fLa, La[2]/fLa])
vb = array([Lb[0]/fLb, Lb[1]/fLb, Lb[2]/fLb])
print "Visit ratioA: %7.4f %7.4f %7.4f" % (va[0], va[1], va[2])
print "Expected V_a: %7.4f %7.4f %7.4f" % (19.25/fLa, 19.25/fLa, 0.385/fLa)
print "Visit ratioB: %7.4f %7.4f %7.4f" % (vb[0], vb[1], vb[2])
print "Expected V_b: %7.4f %7.4f %7.4f" % (15.75/fLb, 0.1575/fLb, 15.75/fLb)
# Service demands in HOURS (same for both classes at each node)
S = array([0.5/60, 6.0/60, 20.0/60])
Da = array([va[0] * S[0], va[1] * S[1], va[2] * S[2]])
Db = array([vb[0] * S[0], vb[1] * S[1], vb[2] * S[2]])
# Total utilization per server
rho = array([La[0]*Da[0] + Lb[0]*Db[0], (La[1]*Da[1] + Lb[1]*Db[1])/m[1], (La[2]*Da[2] + Lb[2]*Db[2])/m[2]])
print "Utilizations: %7.4f %7.4f %7.4f" % (rho[0], rho[1], rho[2])
# Queue lengths
Q0 = m[0]*rho[0] + ErlangC(m[0], m[0]*rho[0]) * (rho[0]/(1 - rho[0]))
Q1 = m[1]*rho[1] + ErlangC(m[1], m[1]*rho[1]) * (rho[1]/(1 - rho[1]))
Q2 = m[2]*rho[2] + ErlangC(m[2], m[2]*rho[2]) * (rho[2]/(1 - rho[2]))
#print "Queue length1 : %7.4f (Expected: 0.412)" % (rho[0] / (1 - rho[0]))
#print "Queue length1a: %7.4f (Expected: 0.227)" % (La[0] * Da[0] / (1 - rho[0]))
print "Queue length1 : %7.4f (Expected: 0.412)" % (Q0)
print "Queue length2 : %7.4f (Expected: 2.705)" % (Q1)
print "Queue length3 : %7.4f (Expected: 6.777)" % (Q2)
print "Queue length1a: %7.4f (Expected: 0.227)" % (Q0 * (La[0]/(La[0]+Lb[0])))
print "Queue length2a: %7.4f (Expected: 2.683)" % (Q1 * (La[1]/(La[1]+Lb[1])))
print "Queue length3a: %7.4f (Expected: 0.162)" % (Q2 * (La[2]/(La[2]+Lb[2])))
print "Queue length1b: %7.4f (Expected: 0.185)" % (Q0 * (Lb[0]/(La[0]+Lb[0])))
print "Queue length2b: %7.4f (Expected: 0.022)" % (Q1 * (Lb[1]/(La[1]+Lb[1])))
print "Queue length3b: %7.4f (Expected: 6.616)" % (Q2 * (Lb[2]/(La[2]+Lb[2])))
|
peterlharding/PDQ
|
examples/MultiServer/gross_te43.py
|
Python
|
mit
| 5,483
|
[
"VisIt"
] |
24bd430ea86017450edb423b92751788f7a034499d047f2f730cc7de5e8fa1b5
|
import discord
from discord.ext import commands
from __main__ import send_cmd_help
from bs4 import BeautifulSoup
import random
class Animal:
"""sfw commands."""
def __init__(self, bot):
self.bot = bot
self.session = self.bot.http.session
@commands.group(pass_context=True)
async def sfw(self, ctx):
"""sfw Commands"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@sfw.command(no_pm=True)
async def yandere(self):
"""Random Image From Yandere"""
try:
query = ("https://yande.re/post/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="highres").get("href")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def konachan(self):
"""Random Image From Konachan"""
try:
query = ("https://konachan.com/post/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="highres").get("href")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def e621(self):
"""Random Image From e621"""
try:
query = ("https://e621.net/post/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="highres").get("href")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def rule34(self):
"""Random Image From rule34"""
try:
query = ("http://rule34.xxx/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say('http:' + image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def danbooru(self):
"""Random Image From Danbooru"""
try:
query = ("http://danbooru.donmai.us/posts/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say('http://danbooru.donmai.us' + image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def gelbooru(self):
"""Random Image From Gelbooru"""
try:
query = ("http://www.gelbooru.com/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def tbib(self):
"""Random Image From DrunkenPumken"""
try:
query = ("http://www.tbib.org/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say("http:" + image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def xbooru(self):
"""Random Image From Xbooru"""
try:
query = ("http://xbooru.com/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def furrybooru(self):
"""Random Image From Furrybooru"""
try:
query = ("http://furry.booru.org/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def drunkenpumken(self):
"""Random Image From DrunkenPumken"""
try:
query = ("http://drunkenpumken.booru.org/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def lolibooru(self):
"""Random Image From Lolibooru"""
try:
query = ("https://lolibooru.moe/post/random/")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
image = image.replace(' ','%20')
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(pass_context=True, no_pm=True)
async def ysearch(self, ctx, *tags: str):
"""Search Yandere With A Tag"""
if tags == ():
await self.bot.say(":warning: Tags are missing.")
else:
try:
tags = ("+").join(tags)
query = ("https://yande.re/post.json?limit=42&tags=" + tags)
page = await self.session.get(query)
json = await page.json()
if json != []:
await self.bot.say(random.choice(json)['jpeg_url'])
else:
await self.bot.say(":warning: Yande.re has no images for requested tags.")
except Exception as e:
await self.bot.say(":x: `{}`".format(e))
def setup(bot):
n = Animal(bot)
bot.add_cog(n)
|
Vidyapoky/keksimus
|
animal/animal.py
|
Python
|
mit
| 6,952
|
[
"MOE"
] |
91a9a2afa277f401e9c27399165f76fae7e9d55031133c5e8e2f13ac24269b25
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
class UAOHeapDMLTestCase(SQLTestCase):
"""
@tags ORCA
@gucs gp_create_table_random_default_distribution=off
"""
'''
Test queries on UAO tables
'''
sql_dir = 'sql/'
ans_dir = 'expected/'
out_dir = 'output/'
|
CraigHarris/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/uao/uao_heap_dml/test_uao_heap_dml.py
|
Python
|
apache-2.0
| 959
|
[
"ORCA"
] |
9f331be9f756b5e8ca3f2e37599b9e8dddcc4434aee73ca3874f6b6cd8f577fa
|
#!/usr/bin/env python
from vtk import *
reader2 = vtkXMLTreeReader()
reader2.SetFileName("vtkclasses.xml")
reader2.Update()
reader3 = vtkXMLTreeReader()
reader3.SetFileName("vtklibrary.xml")
reader3.Update()
view2 = vtkIcicleView()
view2.SetRepresentationFromInput(reader2.GetOutput())
view2.SetAreaSizeArrayName("size")
view2.SetAreaColorArrayName("vertex id")
view2.SetAreaLabelArrayName("id")
view2.SetAreaLabelVisibility(True)
view2.SetAreaHoverArrayName("id")
view2.SetRootWidth(40.)
view2.SetLayerThickness(2.)
#view2.UseGradientColoring(False)
view2.Update()
view3 = vtkIcicleView()
view3.SetRepresentationFromInput(reader3.GetOutput())
view3.SetAreaSizeArrayName("size")
view3.SetAreaColorArrayName("vertex id")
view3.SetAreaLabelArrayName("id")
view3.SetAreaLabelVisibility(True)
view3.SetAreaHoverArrayName("id")
view3.SetRootWidth(20.)
view3.Update()
# Apply a theme to the views
theme = vtkViewTheme.CreateMellowTheme()
view2.ApplyViewTheme(theme)
view3.ApplyViewTheme(theme)
theme.FastDelete()
view2.ResetCamera()
view3.ResetCamera()
view2.Render()
view3.Render()
view2.GetInteractor().Initialize()
view3.GetInteractor().Initialize()
view2.GetInteractor().Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/Infovis/Python/icicle_view.py
|
Python
|
gpl-3.0
| 1,184
|
[
"VTK"
] |
78c1a4d877fe7c81e00dd5a5d02dae0d454e34c8145d3dedecf04187de2331dd
|
#!/usr/bin/env python
import sys
from ase.test import NotAvailable
if sys.platform in ['win32']:
raise NotAvailable('Fails on Windows https://trac.fysik.dtu.dk/projects/ase/ticket/62')
from ase.visualize.vtk import requirevtk, probe_vtk_kilobyte
requirevtk()
vtk_kilobyte = probe_vtk_kilobyte(1024)
import numpy as np
import sys, unittest, gc
from ase.test import CustomTestCase, CustomTextTestRunner
from ase.utils.memory import MemoryStatistics, MemorySingleton, shapeopt
from vtk import vtkDataArray
from ase.visualize.vtk.data import vtkFloatArrayFromNumPyArray, \
vtkDoubleArrayFromNumPyArray, \
vtkFloatArrayFromNumPyMultiArray, \
vtkDoubleArrayFromNumPyMultiArray
# -------------------------------------------------------------------
class UTConversionDataArrayNumPy(CustomTestCase):
"""
Abstract class with test cases for VTK/NumPy data conversion.
Leak tests the six possible permutations of deletion order for the
objects involved in conversion between VTK and NumPy data formats.
Objects:
conv: instance of vtkDataArrayFromNumPyBuffer of subclass thereof
The object in charge of the conversion
data: NumPy array
NumPy data with a specific memory footprint
vtk_da: instance of vtkDataArray of subclass thereof
VTK data array with a similar memory footprint
Permutations:
Case A: 012 i.e. deletion order is conv, data, vtk_da
Case B: 021 i.e. deletion order is conv, vtk_da, data
Case C: 102 i.e. deletion order is data, conv, vtk_da
Case D: 120 i.e. deletion order is data, vtk_da, conv
Case E: 201 i.e. deletion order is vtk_da, conv, data
Case F: 210 i.e. deletion order is vtk_da, data, conv
"""
footprint = 100*1024**2
dtype = None
verbose = 0
gc_threshold = (300,5,5) #default is (700,10,10)
gc_flags = gc.DEBUG_LEAK # | gc.DEBUG_STATS
ctol = -7 #10MB
etol = -7 #10MB
def setUp(self):
self.mem_ini = MemorySingleton(self.verbose-1)
self.mem_ref = MemoryStatistics(self.verbose-1)
self.mem_cur = self.mem_ref.copy()
self.gc_threshold_old = gc.get_threshold()
self.gc_flags_old = gc.get_debug()
gc.set_threshold(*self.gc_threshold)
gc.set_debug(self.gc_flags)
# Try to obtain a clean slate
gc.collect()
self.gc_count = len(gc.garbage)
del gc.garbage[:]
def tearDown(self):
gc.collect()
self.assertEqual(len(gc.garbage), self.gc_count)
if len(gc.garbage)>0:
if self.verbose>1: print gc.get_objects()
#TODO be pedantic and fail?
del gc.garbage[:]
gc.set_threshold(*self.gc_threshold_old)
gc.set_debug(self.gc_flags_old)
def assertAlmostConsumed(self, bytes, digits=0, key='VmSize'):
self.mem_cur.update()
dm = self.mem_cur-self.mem_ref
self.assertAlmostEqual(dm[key], bytes, digits)
def assertAlmostExceeded(self, bytes, digits=0, key='VmPeak'):
self.mem_cur.update()
dm = self.mem_cur-self.mem_ini
#self.assertAlmostEqual(dm[key], bytes, digits) #TODO what really?
#self.assertAlmostEqual(max(0, dm[key]-bytes), 0, digits) #TODO ???
#dm = 200 MB, bytes = 100MB ok
#dm = 101 MB, bytes = 100MB ok
#dm = 0 MB, bytes = 100MB bad
def convert_to_vtk_array(self, data):
"""Convert an ndarray to a VTK data array.
data: NumPy array
NumPy data with a specific memory footprint
"""
raise RuntimeError('Virtual member function.')
def get_leaktest_scenario(self):
"""Construct the necessary conversion objects for leak testing.
Returns tuple of the form (conv, data, vtk_da,) where:
conv: instance of vtkDataArrayFromNumPyBuffer of subclass thereof
The object in charge of the conversion
data: NumPy array
NumPy data with a specific memory footprint
vtk_da: instance of vtkDataArray of subclass thereof
VTK data array with a similar memory footprint
"""
raise RuntimeError('Virtual member function.')
# =================================
def test_deletion_case_a(self):
# Case A: 012 i.e. deletion order is conv, data, vtk_da
(conv, data, vtk_da,) = self.get_leaktest_scenario()
# Conversion cleanup
del conv
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion cleanup=', self.mem_cur-self.mem_ref
# NumPy cleanup
del data
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'NumPy cleanup=', self.mem_cur-self.mem_ref
# VTK cleanup
del vtk_da
self.assertAlmostConsumed(0, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'VTK cleanup=', self.mem_cur-self.mem_ref
def test_deletion_case_b(self):
# Case B: 021 i.e. deletion order is conv, vtk_da, data
(conv, data, vtk_da,) = self.get_leaktest_scenario()
# Conversion cleanup
del conv
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion cleanup=', self.mem_cur-self.mem_ref
# VTK cleanup
del vtk_da
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'VTK cleanup=', self.mem_cur-self.mem_ref
# Numpy cleanup
del data
self.assertAlmostConsumed(0, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'NumPy cleanup=', self.mem_cur-self.mem_ref
def test_deletion_case_c(self):
# Case C: 102 i.e. deletion order is data, conv, vtk_da
(conv, data, vtk_da,) = self.get_leaktest_scenario()
# NumPy cleanup
del data
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'NumPy cleanup=', self.mem_cur-self.mem_ref
# Conversion cleanup
del conv
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion cleanup=', self.mem_cur-self.mem_ref
# VTK cleanup
del vtk_da
self.assertAlmostConsumed(0, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'VTK cleanup=', self.mem_cur-self.mem_ref
def test_deletion_case_d(self):
# Case D: 120 i.e. deletion order is data, vtk_da, conv
(conv, data, vtk_da,) = self.get_leaktest_scenario()
# NumPy cleanup
del data
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'NumPy cleanup=', self.mem_cur-self.mem_ref
# VTK cleanup
del vtk_da
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'VTK cleanup=', self.mem_cur-self.mem_ref
# Conversion cleanup
del conv
self.assertAlmostConsumed(0, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion cleanup=', self.mem_cur-self.mem_ref
def test_deletion_case_e(self):
# Case E: 201 i.e. deletion order is vtk_da, conv, data
(conv, data, vtk_da,) = self.get_leaktest_scenario()
# VTK cleanup
del vtk_da
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'VTK cleanup=', self.mem_cur-self.mem_ref
# Conversion cleanup
del conv
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion cleanup=', self.mem_cur-self.mem_ref
# NumPy cleanup
del data
self.assertAlmostConsumed(0, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'NumPy cleanup=', self.mem_cur-self.mem_ref
def test_deletion_case_f(self):
# Case F: 210 i.e. deletion order is vtk_da, data, conv
(conv, data, vtk_da,) = self.get_leaktest_scenario()
# VTK cleanup
del vtk_da
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'VTK cleanup=', self.mem_cur-self.mem_ref
# NumPy cleanup
del data
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'NumPy cleanup=', self.mem_cur-self.mem_ref
# Conversion cleanup
del conv
self.assertAlmostConsumed(0, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion cleanup=', self.mem_cur-self.mem_ref
# -------------------------------------------------------------------
# class UTDataArrayFromNumPyBuffer(...): TODO
# -------------------------------------------------------------------
class UTDataArrayFromNumPyArray_Scalar(UTConversionDataArrayNumPy):
"""
Test cases for memory leaks during VTK/NumPy data conversion.
Conversion of 1D NumPy array to VTK data array using buffers."""
def setUp(self):
UTConversionDataArrayNumPy.setUp(self)
self.shape = self.footprint//np.nbytes[self.dtype]
def get_leaktest_scenario(self):
self.assertAlmostEqual(np.prod(self.shape)*np.nbytes[self.dtype], \
self.footprint, -2) #100B
# Update memory reference
self.mem_ref.update()
# NumPy allocation
data = np.empty(self.shape, self.dtype)
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(self.footprint, self.etol)
if self.verbose>=1: print 'NumPy allocation=', self.mem_cur-self.mem_ref
# NumPy to VTK conversion
np2da = self.convert_to_vtk_array(data)
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion=', self.mem_cur-self.mem_ref
# VTK retrieval
vtk_da = np2da.get_output()
self.assertTrue(isinstance(vtk_da, vtkDataArray))
self.assertAlmostEqual(vtk_da.GetActualMemorySize()*vtk_kilobyte, \
self.footprint, -3) #1kB
if self.verbose>=1: print 'VTK retrieval=', self.mem_cur-self.mem_ref
return (np2da, data, vtk_da,)
class UTFloatArrayFromNumPyArray_Scalar(UTDataArrayFromNumPyArray_Scalar):
__doc__ = UTDataArrayFromNumPyArray_Scalar.__doc__
dtype = np.float32
convert_to_vtk_array = vtkFloatArrayFromNumPyArray
class UTDoubleArrayFromNumPyArray_Scalar(UTDataArrayFromNumPyArray_Scalar):
__doc__ = UTDataArrayFromNumPyArray_Scalar.__doc__
dtype = np.float64
convert_to_vtk_array = vtkDoubleArrayFromNumPyArray
class UTDataArrayFromNumPyArray_Vector(UTConversionDataArrayNumPy):
"""
Test cases for memory leaks during VTK/NumPy data conversion.
Conversion of 2D NumPy array to VTK data array using buffers."""
def setUp(self):
UTConversionDataArrayNumPy.setUp(self)
size = self.footprint//np.nbytes[self.dtype]
self.shape = (size//3, 3)
def get_leaktest_scenario(self):
self.assertAlmostEqual(np.prod(self.shape)*np.nbytes[self.dtype], \
self.footprint, -2) #100B
# Update memory reference
self.mem_ref.update()
# NumPy allocation
data = np.empty(self.shape, self.dtype)
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(self.footprint, self.etol)
if self.verbose>=1: print 'NumPy allocation=', self.mem_cur-self.mem_ref
# NumPy to VTK conversion
np2da = self.convert_to_vtk_array(data)
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion=', self.mem_cur-self.mem_ref
# VTK retrieval
vtk_da = np2da.get_output()
self.assertTrue(isinstance(vtk_da, vtkDataArray))
self.assertAlmostEqual(vtk_da.GetActualMemorySize()*vtk_kilobyte, \
self.footprint, -3) #1kB
if self.verbose>=1: print 'VTK retrieval=', self.mem_cur-self.mem_ref
return (np2da, data, vtk_da,)
class UTFloatArrayFromNumPyArray_Vector(UTDataArrayFromNumPyArray_Vector):
__doc__ = UTDataArrayFromNumPyArray_Vector.__doc__
dtype = np.float32
convert_to_vtk_array = vtkFloatArrayFromNumPyArray
class UTDoubleArrayFromNumPyArray_Vector(UTDataArrayFromNumPyArray_Vector):
__doc__ = UTDataArrayFromNumPyArray_Vector.__doc__
dtype = np.float64
convert_to_vtk_array = vtkDoubleArrayFromNumPyArray
# -------------------------------------------------------------------
class UTDataArrayFromNumPyMultiArray_Scalar(UTConversionDataArrayNumPy):
"""
Test cases for memory leaks during VTK/NumPy data conversion.
Conversion of NumPy grid scalars to VTK data array using buffers."""
def setUp(self):
UTConversionDataArrayNumPy.setUp(self)
size = self.footprint//np.nbytes[self.dtype]
digits, shape = shapeopt(1000, size, ndims=3, ecc=0.3)
if self.verbose>=1: print 'digits=%8.3f, shape=%s' % (digits,shape)
self.shape = shape + (1,)
self.assertAlmostEqual(np.prod(self.shape)*np.nbytes[self.dtype], \
self.footprint, -4) #10kB
def get_leaktest_scenario(self):
# Update memory reference
self.mem_ref.update()
# NumPy allocation
data = np.empty(self.shape, self.dtype)
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(self.footprint, self.etol)
if self.verbose>=1: print 'NumPy allocation=', self.mem_cur-self.mem_ref
# NumPy to VTK conversion
np2da = self.convert_to_vtk_array(data)
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion=', self.mem_cur-self.mem_ref
# VTK retrieval
vtk_da = np2da.get_output()
self.assertTrue(isinstance(vtk_da, vtkDataArray))
self.assertAlmostEqual(vtk_da.GetActualMemorySize()*vtk_kilobyte, \
self.footprint, -4) #10kB
if self.verbose>=1: print 'VTK retrieval=', self.mem_cur-self.mem_ref
return (np2da, data, vtk_da,)
class UTFloatArrayFromNumPyMultiArray_Scalar(UTDataArrayFromNumPyMultiArray_Scalar):
__doc__ = UTDataArrayFromNumPyMultiArray_Scalar.__doc__
dtype = np.float32
convert_to_vtk_array = vtkFloatArrayFromNumPyMultiArray
class UTDoubleArrayFromNumPyMultiArray_Scalar(UTDataArrayFromNumPyMultiArray_Scalar):
__doc__ = UTDataArrayFromNumPyMultiArray_Scalar.__doc__
dtype = np.float64
convert_to_vtk_array = vtkDoubleArrayFromNumPyMultiArray
class UTDataArrayFromNumPyMultiArray_Vector(UTConversionDataArrayNumPy):
"""
Test cases for memory leaks during VTK/NumPy data conversion.
Conversion of NumPy grid vectors to VTK data array using buffers."""
def setUp(self):
UTConversionDataArrayNumPy.setUp(self)
size = self.footprint//np.nbytes[self.dtype]
digits, shape = shapeopt(1000, size//3, ndims=3, ecc=0.3)
if self.verbose>=1: print 'digits=%8.3f, shape=%s' % (digits,shape)
self.shape = shape + (3,)
self.assertAlmostEqual(np.prod(self.shape)*np.nbytes[self.dtype], \
self.footprint, -4) #10kB
def get_leaktest_scenario(self):
# Update memory reference
self.mem_ref.update()
# NumPy allocation
data = np.empty(self.shape, self.dtype)
self.assertAlmostConsumed(self.footprint, self.ctol)
self.assertAlmostExceeded(self.footprint, self.etol)
if self.verbose>=1: print 'NumPy allocation=', self.mem_cur-self.mem_ref
# NumPy to VTK conversion
np2da = self.convert_to_vtk_array(data)
self.assertAlmostConsumed(2*self.footprint, self.ctol)
self.assertAlmostExceeded(2*self.footprint, self.etol)
if self.verbose>=1: print 'Conversion=', self.mem_cur-self.mem_ref
# VTK retrieval
vtk_da = np2da.get_output()
self.assertTrue(isinstance(vtk_da, vtkDataArray))
self.assertAlmostEqual(vtk_da.GetActualMemorySize()*vtk_kilobyte, \
self.footprint, -4) #10kB
if self.verbose>=1: print 'VTK retrieval=', self.mem_cur-self.mem_ref
return (np2da, data, vtk_da,)
class UTFloatArrayFromNumPyMultiArray_Vector(UTDataArrayFromNumPyMultiArray_Vector):
__doc__ = UTDataArrayFromNumPyMultiArray_Vector.__doc__
dtype = np.float32
convert_to_vtk_array = vtkFloatArrayFromNumPyMultiArray
class UTDoubleArrayFromNumPyMultiArray_Vector(UTDataArrayFromNumPyMultiArray_Vector):
__doc__ = UTDataArrayFromNumPyMultiArray_Vector.__doc__
dtype = np.float64
convert_to_vtk_array = vtkDoubleArrayFromNumPyMultiArray
# -------------------------------------------------------------------
if __name__ in ['__main__', '__builtin__']:
# We may have been imported by test.py, if so we should redirect to logfile
if __name__ == '__builtin__':
testrunner = CustomTextTestRunner('vtk_data.log', verbosity=2)
else:
testrunner = unittest.TextTestRunner(stream=sys.stdout, verbosity=2)
testcases = [UTFloatArrayFromNumPyArray_Scalar, \
UTDoubleArrayFromNumPyArray_Scalar, \
UTFloatArrayFromNumPyArray_Vector, \
UTDoubleArrayFromNumPyArray_Vector, \
UTFloatArrayFromNumPyMultiArray_Scalar, \
UTDoubleArrayFromNumPyMultiArray_Scalar, \
UTFloatArrayFromNumPyMultiArray_Vector, \
UTDoubleArrayFromNumPyMultiArray_Vector]
for test in testcases:
info = '\n' + test.__name__ + '\n' + test.__doc__.strip('\n') + '\n'
testsuite = unittest.defaultTestLoader.loadTestsFromTestCase(test)
testrunner.stream.writeln(info)
testresult = testrunner.run(testsuite)
# Provide feedback on failed tests if imported by test.py
if __name__ == '__builtin__' and not testresult.wasSuccessful():
raise SystemExit('Test failed. Check vtk_data.log for details.')
|
conwayje/ase-python
|
ase/test/vtk_data.py
|
Python
|
gpl-2.0
| 19,412
|
[
"ASE",
"VTK"
] |
795426ebb10378831a79cefe023aa702278dd58046f4f88ede235f29abb23467
|
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
import os
import sys
import time
VERSION = "2.6.4"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20100711"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in self.packages:
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="python-core-dbg '
for name in self.packages:
if name != 'python-core-dbg':
packageLine += "%s " % name
packageLine += 'python-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in self.packages.iteritems():
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'DESCRIPTION_python-modules="All Python modules"' )
line = 'RDEPENDS_python-modules="'
for name, data in self.packages.iteritems():
if name != 'python-core-dbg':
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_python-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
os.popen( "rm -f ./%s" % sys.argv[1] )
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "python-core", "Python Interpreter and core modules (needed!)", "",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* ${bindir}/python*" )
m.addPackage( "python-core-dbg", "Python core module debug information", "python-core",
"config/.debug lib-dynload/.debug ${bindir}/.debug ${libdir}/.debug" )
m.addPackage( "python-devel", "Python Development Package", "python-core",
"${includedir} ${libdir}/libpython2.6.so config" ) # package
m.addPackage( "python-idle", "Python Integrated Development Environment", "python-core python-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "python-pydoc", "Python Interactive Help Support", "python-core python-lang python-stringold python-re",
"${bindir}/pydoc pydoc.*" )
m.addPackage( "python-smtpd", "Python Simple Mail Transport Daemon", "python-core python-netserver python-email python-mime",
"${bindir}/smtpd.*" )
m.addPackage( "python-audio", "Python Audio Handling", "python-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so" )
m.addPackage( "python-bsddb", "Python Berkeley Database Bindings", "python-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "python-codecs", "Python Codecs, Encodings & i18n Support", "python-core python-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "python-compile", "Python Bytecode Compilation Support", "python-core",
"py_compile.* compileall.*" )
m.addPackage( "python-compiler", "Python Compiler Support", "python-core",
"compiler" ) # package
m.addPackage( "python-compression", "Python High Level Compression Support", "python-core python-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "python-crypt", "Python Basic Cryptographic and Hashing Support", "python-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "python-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "python-core python-io python-re python-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "python-curses", "Python Curses Support", "python-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "python-ctypes", "Python C Types Support", "python-core",
"ctypes lib-dynload/_ctypes.so" ) # directory + low level module
m.addPackage( "python-datetime", "Python Calendar and Time support", "python-core python-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "python-db", "Python File-Based Database Support", "python-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "python-debugger", "Python Debugger", "python-core python-io python-lang python-re python-stringold python-shell python-pprint",
"bdb.* pdb.*" )
m.addPackage( "python-difflib", "Python helpers for computing deltas between objects.", "python-lang python-re",
"difflib.*" )
m.addPackage( "python-distutils", "Python Distribution Utilities", "python-core",
"config distutils" ) # package
m.addPackage( "python-doctest", "Python framework for running examples in docstrings.", "python-core python-lang python-io python-re python-unittest python-debugger python-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "python-elementtree", "Python elementree", "python-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "python-email", "Python Email Support", "python-core python-io python-re python-mime python-audio python-image python-netclient",
"imaplib.* email" ) # package
m.addPackage( "python-fcntl", "Python's fcntl Interface", "python-core",
"lib-dynload/fcntl.so" )
m.addPackage( "python-hotshot", "Python Hotshot Profiler", "python-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "python-html", "Python HTML Processing", "python-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* " )
m.addPackage( "python-gdbm", "Python GNU Database Support", "python-core",
"lib-dynload/gdbm.so" )
m.addPackage( "python-image", "Python Graphical Image Handling", "python-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "python-io", "Python Low-Level I/O", "python-core python-math",
"lib-dynload/_socket.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* " )
m.addPackage( "python-json", "Python JSON Support", "python-core python-math python-re",
"json" ) # package
m.addPackage( "python-lang", "Python Low-Level Language Support", "python-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* linecache.* weakref.*" )
m.addPackage( "python-logging", "Python Logging Support", "python-core python-io python-lang python-pickle python-stringold",
"logging" ) # package
m.addPackage( "python-mailbox", "Python Mailbox Format Support", "python-core python-mime",
"mailbox.*" )
m.addPackage( "python-math", "Python Math Support", "python-core",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "python-mime", "Python MIME Handling APIs", "python-core python-io",
"mimetools.* uu.* quopri.* rfc822.*" )
m.addPackage( "python-mmap", "Python Memory-Mapped-File Support", "python-core python-io",
"lib-dynload/mmap.so " )
m.addPackage( "python-multiprocessing", "Python Multiprocessing Support", "python-core python-io python-lang",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "python-netclient", "Python Internet Protocol Clients", "python-core python-crypt python-datetime python-io python-lang python-logging python-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "python-netserver", "Python Internet Protocol Servers", "python-core python-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "python-numbers", "Python Number APIs", "python-core python-lang python-re",
"decimal.* numbers.*" )
m.addPackage( "python-pickle", "Python Persistence Support", "python-core python-codecs python-io python-re",
"pickle.* shelve.* lib-dynload/cPickle.so" )
m.addPackage( "python-pkgutil", "Python Package Extension Utility Support", "python-core",
"pkgutil.*")
m.addPackage( "python-pprint", "Python Pretty-Print Support", "python-core",
"pprint.*" )
m.addPackage( "python-profile", "Python Basic Profiling Support", "python-core python-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "python-re", "Python Regular Expression APIs", "python-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "python-readline", "Python Readline Support", "python-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "python-resource", "Python Resource Control Interface", "python-core",
"lib-dynload/resource.so" )
m.addPackage( "python-shell", "Python Shell-Like Functionality", "python-core python-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "python-robotparser", "Python robots.txt parser", "python-core python-netclient",
"robotparser.*")
m.addPackage( "python-subprocess", "Python Subprocess Support", "python-core python-io python-re python-fcntl python-pickle",
"subprocess.*" )
m.addPackage( "python-sqlite3", "Python Sqlite3 Database Support", "python-core python-datetime python-lang python-crypt python-io python-threading python-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "python-sqlite3-tests", "Python Sqlite3 Database Support Tests", "python-core python-sqlite3",
"sqlite3/test" )
m.addPackage( "python-stringold", "Python String APIs [deprecated]", "python-core python-re",
"lib-dynload/strop.so string.*" )
m.addPackage( "python-syslog", "Python Syslog Interface", "python-core",
"lib-dynload/syslog.so" )
m.addPackage( "python-terminal", "Python Terminal Controlling Support", "python-core python-io",
"pty.* tty.*" )
m.addPackage( "python-tests", "Python Tests", "python-core",
"test" ) # package
m.addPackage( "python-threading", "Python Threading & Synchronization Support", "python-core python-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "python-tkinter", "Python Tcl/Tk Bindings", "python-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "python-unittest", "Python Unit Testing Framework", "python-core python-stringold python-lang",
"unittest.*" )
m.addPackage( "python-unixadmin", "Python Unix Administration Support", "python-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "python-xml", "Python basic XML support.", "python-core python-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "python-xmlrpc", "Python XMLRPC Support", "python-core python-xml python-netserver python-lang",
"xmlrpclib.* SimpleXMLRPCServer.*" )
m.addPackage( "python-zlib", "Python zlib Support.", "python-core",
"lib-dynload/zlib.so" )
m.addPackage( "python-mailbox", "Python Mailbox Format Support", "python-core python-mime",
"mailbox.*" )
m.make()
|
libo/openembedded
|
contrib/python/generate-manifest-2.6.py
|
Python
|
mit
| 14,959
|
[
"VisIt"
] |
2388d5889245c0db287337b126fdeeea8ad8347980b23d9787ef1978750c4907
|
#! /usr/bin/env python
"""
Script to pre-average data using a sliding Gaussian kernel in frequency
"""
import argparse
from argparse import RawTextHelpFormatter
import numpy as np
import sys
import os
from scipy.ndimage.filters import gaussian_filter1d as gfilter
from scipy.special import erf
import casacore.tables as pt
import pickle
import itertools
def main(ms_input, input_colname, output_data_colname, output_weights_colname,
baseline_file, delta_theta_deg, target_peak_reduction_factor=0.99):
"""
Pre-average data using a sliding Gaussian kernel in frequency
Parameters
----------
ms_input : str
MS filename
input_colname : str
Name of the column in the MS from which the data are read
output_data_colname : str
Name of the column in the MS into which the averaged data are written
output_weights_colname : str
Name of the column in the MS into which the averaged data weights are
written
baseline_file : str
Filename of pickled baseline lengths
delta_theta_deg : float
Radius of calibration region in degrees
target_peak_reduction_factor : float, optional
Target reduction in peak flux density. Note: this reduction is in
addition to any incurred by earlier averaging
"""
if os.path.exists(baseline_file):
f = open(baseline_file, 'r')
baseline_dict = pickle.load(f)
f.close()
else:
print('Cannot find baseline_file. Exiting...')
sys.exit(1)
delta_theta_deg = float(delta_theta_deg)
target_peak_reduction_factor = float(target_peak_reduction_factor)
ms = pt.table(ms_input, readonly=False, ack=False)
ant1_list = ms.getcol('ANTENNA1')
ant2_list = ms.getcol('ANTENNA2')
data_all = ms.getcol(input_colname)
weights_all = ms.getcol('WEIGHT_SPECTRUM')
flags = ms.getcol('FLAG')
# Get lowest frequency of MS and channel width
sw = pt.table(ms_input+'::SPECTRAL_WINDOW', ack=False)
freq_hz = sw.col('CHAN_FREQ')[0][0]
chan_width_hz = sw.col('CHAN_WIDTH')[0][0]
flags[ np.isnan(data_all) ] = True # flag NaNs
weights_all = weights_all * ~flags # set weight of flagged data to 0
# Check that all NaNs are flagged
if np.count_nonzero(np.isnan(data_all[~flags])) > 0:
logging.error('NaNs in unflagged data in {0}!'.format(ms_input))
sys.exit(1)
# Weight data and set bad data to 0 so nans do not propagate
data_all = np.nan_to_num(data_all*weights_all)
# Iteration on baseline combination
for ant in itertools.product(set(ant1_list), set(ant2_list)):
if ant[0] >= ant[1]:
continue
sel1 = np.where(ant1_list == ant[0])[0]
sel2 = np.where(ant2_list == ant[1])[0]
sel_list = sorted(list(frozenset(sel1).intersection(sel2)))
data = data_all[sel_list,:,:]
weights = weights_all[sel_list,:,:]
# compute the Gaussian sigma from the max bandwidth over which we
# can average and avoid significant bandwidth smearing but limited to
# no more than 3 MHz (to avoid smoothing over the beam-induced effects)
lambda_km = 299792.458 / freq_hz
dist_km = baseline_dict['{0}-{1}'.format(ant[0], ant[1])]
if dist_km > 0:
resolution_deg = lambda_km / dist_km * 180.0 / np.pi
stddev_hz = min(3e6, get_target_bandwidth(freq_hz, delta_theta_deg,
resolution_deg, target_peak_reduction_factor)/4.0)
stddev_nchan = stddev_hz / chan_width_hz * np.sqrt(0.5 / dist_km)
# smear weighted data and weights
dataR = gfilter(np.real(data), stddev_nchan, axis=1)
dataI = gfilter(np.imag(data), stddev_nchan, axis=1)
weights = gfilter(weights, stddev_nchan, axis=1)
# re-create data
data = (dataR + 1j * dataI)
data[(weights != 0)] /= weights[(weights != 0)] # avoid divbyzero
data_all[sel_list,:,:] = data
weights_all[sel_list,:,:] = weights
# Add the output columns if needed
if output_data_colname not in ms.colnames():
desc = ms.getcoldesc(input_colname)
desc['name'] = output_data_colname
ms.addcols(desc)
if output_weights_colname not in ms.colnames():
desc = ms.getcoldesc('WEIGHT_SPECTRUM')
desc['name'] = output_weights_colname
ms.addcols(desc)
ms.putcol(output_data_colname, data_all)
ms.putcol('FLAG', flags) # this saves flags of nans, which is always good
ms.putcol(output_weights_colname, weights_all)
ms.close()
def get_bandwidth_smearing_factor(freq, delta_freq, delta_theta, resolution):
"""
Returns peak flux density reduction factor due to bandwidth smearing
Parameters
----------
freq : float
Frequency at which averaging will be done
delta_freq : float
Bandwidth over which averaging will be done
delta_theta : float
Distance from phase center
resolution : float
Resolution of restoring beam
Returns
-------
reduction_factor : float
Ratio of post-to-pre averaging peak flux density
"""
beta = (delta_freq/freq) * (delta_theta/resolution)
gamma = 2*(np.log(2)**0.5)
reduction_factor = ((np.pi**0.5)/(gamma * beta)) * (erf(beta*gamma/2.0))
return reduction_factor
def get_target_bandwidth(freq, delta_theta, resolution, reduction_factor):
"""
Returns the bandwidth for given peak flux density reduction factor
Parameters
----------
freq : float
Frequency at which averaging will be done
delta_theta : float
Distance from phase center
resolution : float
Resolution of restoring beam
reduction_factor : float
Ratio of post-to-pre averaging peak flux density
Returns
-------
delta_freq : float
Bandwidth over which averaging will be done
"""
# Increase delta_freq until we drop below target reduction_factor
delta_freq = 1e-3 * freq
while get_bandwidth_smearing_factor(freq, delta_freq, delta_theta,
resolution) > reduction_factor:
delta_freq *= 1.1
return delta_freq
|
lofar-astron/factor
|
factor/scripts/pre_average_freq.py
|
Python
|
gpl-2.0
| 6,208
|
[
"Gaussian"
] |
6d13e8acc00d0d34f33f2d2ac8c92b5c041beb8653b643099c0dfdfa99c48976
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
from bpy.types import NodeSocket, Node
from ...util import asUpdate
from ..materials import AppleseedMatLayerProps
from . import AppleseedNode, AppleseedSocket
class AppleseedAshikhminReflectanceSocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedAshikhminReflectance"
bl_label = "Diffuse Reflectance"
socket_value = AppleseedMatLayerProps.ashikhmin_reflectance
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.8, 0.8, 0.5, 1.0
class AppleseedAshikhminMultiplierSocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedAshikhminMultiplier"
bl_label = "Diffuse Multiplier"
socket_value = AppleseedMatLayerProps.ashikhmin_multiplier
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1.0
class AppleseedAshikhminGlossySocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedAshikhminGlossy"
bl_label = "Glossy Reflectance"
socket_value = AppleseedMatLayerProps.ashikhmin_glossy
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1.0
class AppleseedAshikhminUSocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedAshikhminU"
bl_label = "Shininess U"
socket_value = AppleseedMatLayerProps.ashikhmin_shininess_u
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1.0
class AppleseedAshikhminVSocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedAshikhminV"
bl_label = "Shininess V"
socket_value = AppleseedMatLayerProps.ashikhmin_shininess_v
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1.0
class AppleseedAshikhminFresnelSocket(NodeSocket, AppleseedSocket):
bl_idname = "AppleseedAshikhminFresnel"
bl_label = "Fresnel"
socket_value = bpy.props.FloatProperty(name="Fresnel Multiplier",
description="Ashikhmin fresnel multiplier",
default=1,
min=0,
max=1)
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "socket_value", text=text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1.0
class AppleseedAshikhminNode(Node, AppleseedNode):
bl_idname = "AppleseedAshikhminNode"
bl_label = "Ashikhmin-Shirley BRDF"
bl_icon = 'SMOOTH'
node_type = 'ashikhmin'
def init(self, context):
self.inputs.new('AppleseedAshikhminReflectance', "Reflectance")
self.inputs.new('AppleseedAshikhminMultiplier', "Multiplier")
self.inputs.new('AppleseedAshikhminGlossy', "Glossy Reflectance")
self.inputs.new('AppleseedAshikhminU', "Shininess U")
self.inputs.new('AppleseedAshikhminV', "Shininess V")
self.inputs.new('AppleseedAshikhminFresnel', "Fresnel Multiplier")
self.outputs.new('NodeSocketShader', "BRDF")
def draw_buttons(self, context, layout):
pass
def draw_buttons_ext(self, context, layout):
pass
def copy(self, node):
pass
def free(self):
asUpdate("Removing node ", self)
def draw_label(self):
return self.bl_label
def register():
bpy.utils.register_class(AppleseedAshikhminMultiplierSocket)
bpy.utils.register_class(AppleseedAshikhminReflectanceSocket)
bpy.utils.register_class(AppleseedAshikhminGlossySocket)
bpy.utils.register_class(AppleseedAshikhminUSocket)
bpy.utils.register_class(AppleseedAshikhminVSocket)
bpy.utils.register_class(AppleseedAshikhminFresnelSocket)
bpy.utils.register_class(AppleseedAshikhminNode)
def unregister():
bpy.utils.unregister_class(AppleseedAshikhminNode)
bpy.utils.unregister_class(AppleseedAshikhminMultiplierSocket)
bpy.utils.unregister_class(AppleseedAshikhminReflectanceSocket)
bpy.utils.unregister_class(AppleseedAshikhminGlossySocket)
bpy.utils.unregister_class(AppleseedAshikhminUSocket)
bpy.utils.unregister_class(AppleseedAshikhminVSocket)
bpy.utils.unregister_class(AppleseedAshikhminFresnelSocket)
|
jasperges/blenderseed
|
properties/nodes/ashikhmin_brdf.py
|
Python
|
mit
| 6,446
|
[
"VisIt"
] |
ff11bc389d4cc35a496febe55739a742b81cbc4a3f28272bcb398cb2108f3ce5
|
import os
import glob
import inspect
import fnmatch
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset as ncfile
try:
import seawater.gibbs as gsw
import seawater.csiro as csw
except ImportError:
pass # module doesn't exist, deal with it.
#import alti_tools as atools
from scipy import interpolate
from warnings import warn
from altimetry.tools import recale_limits, in_limits, cumulative_distance, calcul_distance, \
where_list, \
cnes_convert, \
plot_map, \
get_caller
from collections import OrderedDict
#Additional functions
def load_ncVar(varName, nc=None, **kwargs):
if (nc is None) : raise Exception('No Netcdf file passed')
#Load variable
var = nc.variables[varName]
var.set_auto_maskandscale(False)
#Load dimensions
varDim = [str(dim) for dim in var.dimensions]
missDim=len(varDim) == 0
if (missDim): warn('No dimension found')
else : varDimval = [len(nc.dimensions[dimname]) for dimname in varDim]
#Load Attributes
attrStr=var.__dict__
ind_list = [] #Init index list
dims = OrderedDict({'_ndims':0}) #Init dimensions
dstr=[]
shape=()
#Construct index list
#looping on variable dimension list
for vid,vn in enumerate(varDim) :
#No indexation on current dimension
if not kwargs.has_key(vn) :
ind_list.append(xrange(varDimval[vid]))
dims.update({enum[1]:varDimval[enum[0]]})
#Data is indexed along current dimension
else :
dumind = kwargs[vn]
# if not isinstance(dumind,list) : dumind=tuple(dumind)
if isinstance(dumind,np.ndarray) : dumind=dumind.tolist() #Rq: tolist() can take a very long time to run on large arrays
if type(dumind) is not list : dumind=[dumind]
ind_list.append(dumind)
# ind_list=(ind_list,dumind) if len(ind_list) != 0 else (dumind,)
dims.update({vid:len(dumind)})
#check index list
sz=[len(i) for i in ind_list]
#find empty dimensions
if not (where_list([0],sz)[0] == -1 ) : varOut=var[[0]][[]] #np.array(where_list(sz,[0])) == -1
else :
varOut=var[ind_list]#.copy() #THIS IS LONG!!
if var.shape == (1,1) : varOut=varOut.reshape(var.shape)
#Mask it!
if var.__dict__.has_key('_FillValue') : mask=varOut == var._FillValue
elif var.__dict__.has_key('missing_value') : mask=varOut == var.missing_value
else : mask=np.zeros(varOut.shape,dtype='bool')
#Scale it
#note : we do not use the *= or += operators to force casting to scaling attribute types
if var.__dict__.has_key('scale') : varOut =varOut * var.scale
elif var.__dict__.has_key('scale_factor') : varOut = varOut * var.scale_factor
if var.__dict__.has_key('add_offset') : varOut = varOut + var.add_offset
#Set masks properly
if isinstance(varOut,np.ndarray) : varOut=np.ma.masked_array(varOut,mask=mask)
elif isinstance(varOut,np.ma.masked_array) : var.mask=mask
else : raise 'This data type {} has not been defined - code it!'.format(type(varOut))
#Get attributes
attrStr=var.__dict__
attrStr.pop('_FillValue',None) #Remove this attributed as it is overidden
#Append attributes to varOut
varOut.__dict__.update(attrStr)
#Build up output structure
outStr={'_dimensions':dims,'data':varOut}
dims.update({'_ndims':len(dims.keys()[1:])})
return outStr
# ind_list=[[]]
|
rdussurget/py-altimetry
|
altimetry/data/hydro_nc.py
|
Python
|
lgpl-3.0
| 3,983
|
[
"NetCDF"
] |
3ee91b9ff4dc0f1786e099cd00854fe7184240f5772ede1291b29e8bdaf79ec2
|
#!/usr/bin/env python3
# Coptyright 2016 (c) Brian McKean
'''
File name: getHost.py
Author: Brian McKean
Date created: 08/31/2016
Date last modified: 09/02/2016
Python Version: 3.5
Gets a list of IPs and dns names for a set of ec2 instances
input: None = gets all hoste
names = gets a list of hosts that match the name qualifier
output: stdout list of hosts by
ip_address dns_name # host_name
'''
from ec2restsend import ec2RestSend
import pprint
import xmltodict
import re
import sys
def getHost(match_string=None):
e = ec2RestSend()
ans = e.sendEc2Rest('DescribeInstances')
if (match_string):
m = re.compile(match_string)
if (ans):
match = False
ans_dict = xmltodict.parse(ans)
# Uncomment print if you want to see all data transferred
#pprint.pprint(ans_dict)
ret = "# Set of instances in AWS \n"
for i in range(len(ans_dict["DescribeInstancesResponse"]["reservationSet"]["item"])):
dns = ans_dict["DescribeInstancesResponse"]["reservationSet"]["item"][i]["instancesSet"]["item"]["dnsName"]
ipAddr = ans_dict["DescribeInstancesResponse"]["reservationSet"]["item"][i]["instancesSet"]["item"]["ipAddress"]
instanceName = ans_dict["DescribeInstancesResponse"]["reservationSet"]["item"][i]["instancesSet"]["item"]["tagSet"]["item"]["value"]
if (match_string is None):
match = True
ret += ipAddr + "\t" + dns + "\t" + "# " + instanceName + "\n"
else:
if (m.search(instanceName)):
match = True
ret += ipAddr + "\t" + dns + "\t" + "# " + instanceName + "\n"
if (match == False):
if match_string is None:
ret += "No instances were returned\n"
else:
ret += "No instances matching "+match_string+" were found\n"
return "##----------------------------------------------------##\n"+ret
else:
return "Got no response\n"
############
if __name__ == "__main__":
ans = "Error processing getHost.py"
if (len(sys.argv) > 2):
ans = "usage: getHost.py [match_string]"
elif (len(sys.argv) == 2):
match_string = sys.argv[1]
ans = getHost(match_string)
else:
ans = getHost()
print(ans)
|
co-bri/quevedo
|
v2_scripts/gethost.py
|
Python
|
mit
| 2,422
|
[
"Brian"
] |
09de2ef0773acd793671ae00eb2332f1afdfe0541050edb047c5be858124e7c2
|
## Generating simulated data for variations in the object parameters:
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import random
### User-defined functions
def rigid_fixed(K_robot, K_rf):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
#print len(dist)
#print len(time)
applied_force_rf = np.zeros((len(dist),1))
deform_rf = np.zeros((len(dist),1))
sensed_force_rf = np.zeros((len(time),1))
robot_pos_rf = np.zeros((len(time),1))
for i in range(len(robot_pos_rf)):
robot_pos_rf[i] = Robot_Current_Position
for i in range(1,len(dist)):
applied_force_rf[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_rf[i] = applied_force_rf[i]/K_rf
if i == 1:
if (Robot_Current_Position + deform_rf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rf[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_rf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rf[i] - deform_rf[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_rf[i] = K_rf*(Robot_Current_Position - 0.60)
robot_pos_rf[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
#print i
sensed_force_rf[i] = sensed_force_rf[i-1]
robot_pos_rf[i] = Robot_Current_Position
force = sum(sensed_force_rf.tolist(),[])
pos = sum(robot_pos_rf.tolist(),[])
#print force
#print pos
#print np.shape(pos)
#print np.shape(force)
return pos,force
def soft_fixed(K_robot, K_sf):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
applied_force_sf = np.zeros((len(dist),1))
deform_sf = np.zeros((len(dist),1))
sensed_force_sf = np.zeros((len(time),1))
robot_pos_sf = np.zeros((len(time),1))
for i in range(len(robot_pos_sf)):
robot_pos_sf[i] = Robot_Current_Position
for i in range(1,len(dist)):
applied_force_sf[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_sf[i] = applied_force_sf[i]/K_sf
if i == 1:
if (Robot_Current_Position + deform_sf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sf[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_sf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sf[i] - deform_sf[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_sf[i] = K_sf*(Robot_Current_Position - 0.60)
robot_pos_sf[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
sensed_force_sf[i] = sensed_force_sf[i-1]
robot_pos_sf[i] = Robot_Current_Position
force = sum(sensed_force_sf.tolist(),[])
pos = sum(robot_pos_sf.tolist(),[])
return pos,force
def rigid_movable(K_robot, K_rm, Mass_rm, mu_static_rigid, mu_dynamic_rigid):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
g = 9.81
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
applied_force_rm = np.zeros((len(dist),1))
deform_rm = np.zeros((len(dist),1))
acc_rm = np.zeros((len(dist),1))
vel_rm = np.zeros((len(dist),1))
pos_rm = np.zeros((len(time),1))
sensed_force_rm = np.zeros((len(time),1))
robot_pos_rm = np.zeros((len(time),1))
for i in range(len(robot_pos_rm)):
robot_pos_rm[i] = Robot_Current_Position
stat_force = Mass_rm*g*mu_static_rigid
index = 1
for i in range(1,len(dist)):
if index == 1:
applied_force_rm[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_rm[i] = applied_force_rm[i]/K_rm
if i == 1:
if (Robot_Current_Position + deform_rm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rm[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_rm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rm[i] - deform_rm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_rm[i] = K_rm*(Robot_Current_Position - 0.60)
else:
applied_force_rm[i] = K_rm*(dist[i] - Robot_Current_Position)
if (applied_force_rm[i] <= stat_force) and (index == 1):
sensed_force_rm[i] = sensed_force_rm[i]
acc_rm[i] = 0
vel_rm[i] = 0
pos_rm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
net_force_rm = applied_force_rm[i] - Mass_rm*g*mu_dynamic_rigid
sensed_force_rm[i] = Mass_rm*g*mu_dynamic_rigid
if net_force_rm < 0:
net_force_rm = 0
acc_rm[i] = 0
vel_rm[i] = 0
pos_rm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
acc_rm[i] = net_force_rm/Mass_rm
vel_rm[i] = vel_rm[i-1]+acc_rm[i]*0.01
pos_rm[i] = pos_rm[i-1]+vel_rm[i]*0.01
if (Robot_Current_Position + pos_rm[i] - pos_rm[i-1]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + pos_rm[i] - pos_rm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + dist[i] - dist[i-1]
index = index+1
robot_pos_rm[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
sensed_force_rm[i] = sensed_force_rm[i-1]
pos_rm[i] = pos_rm[i-1]
robot_pos_rm[i] = Robot_Current_Position
force = sum(sensed_force_rm.tolist(),[])
pos = sum(robot_pos_rm.tolist(),[])
return pos,force
def soft_movable(K_robot, K_sm, Mass_sm, mu_static_soft, mu_dynamic_soft):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
g = 9.81
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
applied_force_sm = np.zeros((len(dist),1))
deform_sm = np.zeros((len(dist),1))
acc_sm = np.zeros((len(dist),1))
vel_sm = np.zeros((len(dist),1))
pos_sm = np.zeros((len(time),1))
sensed_force_sm = np.zeros((len(time),1))
robot_pos_sm = np.zeros((len(time),1))
for i in range(len(robot_pos_sm)):
robot_pos_sm[i] = Robot_Current_Position
stat_force = Mass_sm*g*mu_static_soft
index = 1
for i in range(1,len(dist)):
if index == 1:
applied_force_sm[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_sm[i] = applied_force_sm[i]/K_sm
if i == 1:
if (Robot_Current_Position + deform_sm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sm[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_sm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sm[i] - deform_sm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_sm[i] = K_sm*(Robot_Current_Position - 0.60)
else:
applied_force_sm[i] = K_sm*(dist[i] - Robot_Current_Position)
if (applied_force_sm[i] <= stat_force) and (index == 1):
sensed_force_sm[i] = sensed_force_sm[i]
acc_sm[i] = 0
vel_sm[i] = 0
pos_sm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
net_force_sm = applied_force_sm[i] - Mass_sm*g*mu_dynamic_soft
sensed_force_sm[i] = Mass_sm*g*mu_dynamic_soft
if net_force_sm < 0:
net_force_sm = 0
acc_sm[i] = 0
vel_sm[i] = 0
pos_sm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
acc_sm[i] = net_force_sm/Mass_sm
vel_sm[i] = vel_sm[i-1]+acc_sm[i]*0.01
pos_sm[i] = pos_sm[i-1]+vel_sm[i]*0.01
if (Robot_Current_Position + pos_sm[i] - pos_sm[i-1]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + pos_sm[i] - pos_sm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + dist[i] - dist[i-1]
index = index+1
robot_pos_sm[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
sensed_force_sm[i] = sensed_force_sm[i-1]
pos_sm[i] = pos_sm[i-1]
robot_pos_sm[i] = Robot_Current_Position
force = sum(sensed_force_sm.tolist(),[])
pos = sum(robot_pos_sm.tolist(),[])
return pos,force
### Main Program
if __name__ == '__main__':
time = np.arange(0.00,1.21,0.01)
# For Rigid-Fixed
K_robot = 100
K_rf = np.zeros((100,1))
for i in range(100):
K_rf[i] = 1000*(i+1)
row_rf = np.size(K_rf,0)
samples = len(time)
#print samples
trials_rf = row_rf
robot_pos_rf = np.zeros((trials_rf,samples))
#print np.shape(robot_pos_rf)
sensed_force_rf = np.zeros((trials_rf,samples))
#print np.shape(robot_pos_rf)
k=0
for i in range(100):
#print k
#print np.shape(robot_pos_rf[k,:])
robot_pos_rf[k,:],sensed_force_rf[k,:] = rigid_fixed(K_robot, K_rf[i])
k = k+1
# For Soft-Fixed
K_robot = 100
K_sf = np.zeros((100,1))
for i in range(100):
K_sf[i] = 0.5*(i+1)
row_sf = np.size(K_sf,0)
samples = len(time)
trials_sf = row_sf
robot_pos_sf = np.zeros((trials_sf,samples))
sensed_force_sf = np.zeros((trials_sf,samples))
k=0
for i in range(100):
robot_pos_sf[k,:], sensed_force_sf[k,:] = soft_fixed(K_robot, K_sf[i])
k = k+1
# For Rigid_Movable
K_robot = 100
K_rm = np.zeros((5,1))
Mass_rm = [2.0, 2.1, 2.2, 2.3, 2.4]
mu_static_rigid = [0.45, 0.55]
mu_dynamic_rigid = [0.15, 0.2]
for i in range(5):
K_rm[i] = 500*(i+1)
row_K_rm = np.size(K_rm,0)
row_Mass_rm = np.size(Mass_rm,0)
row_mu_static_rm = len(mu_static_rigid)
row_mu_dynamic_rm = len(mu_dynamic_rigid)
samples = len(time)
trials_rm = row_K_rm*row_Mass_rm*row_mu_static_rm*row_mu_dynamic_rm
robot_pos_rm = np.zeros((trials_rm,samples))
sensed_force_rm = np.zeros((trials_rm,samples))
p=0
for j in range(5):
for k in range(5):
for m in range(2):
for n in range(2):
#print p
robot_pos_rm[p,:], sensed_force_rm[p,:] = rigid_movable(K_robot, K_rm[j], Mass_rm[k], mu_static_rigid[m], mu_dynamic_rigid[n])
p=p+1
# For Soft-Movable
K_robot = 100
K_sm = np.zeros((5,1))
Mass_sm = [0.3, 0.35, 0.4, 0.45, 0.5]
mu_static_soft = [0.15, 0.35]
mu_dynamic_soft = [0.05, 0.1]
for i in range(5):
K_sm[i] = 100*(i+1)
row_K_sm = np.size(K_sm,0)
row_Mass_sm = np.size(Mass_sm,0)
row_mu_static_sm = len(mu_static_soft)
row_mu_dynamic_sm = len(mu_dynamic_soft)
samples = len(time)
trials_sm = row_K_sm*row_Mass_sm*row_mu_static_sm*row_mu_dynamic_sm
robot_pos_sm = np.zeros((trials_sm,samples))
sensed_force_sm = np.zeros((trials_sm,samples))
p=0
for j in range(5):
for k in range(5):
for m in range(2):
for n in range(2):
robot_pos_sm[p,:], sensed_force_sm[p,:] = rigid_movable(K_robot, K_sm[j], Mass_sm[k], mu_static_soft[m], mu_dynamic_soft[n])
p=p+1
# Store data
rf_data = {}
rm_data = {}
sf_data = {}
sm_data = {}
rf_data['sensed_force_rf'] = sensed_force_rf
rm_data['sensed_force_rm'] = sensed_force_rm
sf_data['sensed_force_sf'] = sensed_force_sf
sm_data['sensed_force_sm'] = sensed_force_sm
rf_data['robot_pos_rf'] = robot_pos_rf
rm_data['robot_pos_rm'] = robot_pos_rm
sf_data['robot_pos_sf'] = robot_pos_sf
sm_data['robot_pos_sm'] = robot_pos_sm
scipy.io.savemat('rigid_fixed_object_training.mat',rf_data)
scipy.io.savemat('rigid_movable_object_training.mat',rm_data)
scipy.io.savemat('soft_fixed_object_training.mat',sf_data)
scipy.io.savemat('soft_movable_object_training.mat',sm_data)
# Load data
data_rf = scipy.io.loadmat('rigid_fixed_object_training.mat')
data_sf = scipy.io.loadmat('soft_fixed_object_training.mat')
data_rm = scipy.io.loadmat('rigid_movable_object_training.mat')
data_sm = scipy.io.loadmat('soft_movable_object_training.mat')
dataforce_rf = np.transpose(data_rf['sensed_force_rf'])
dataforce_sf = np.transpose(data_sf['sensed_force_sf'])
dataforce_rm = np.transpose(data_rm['sensed_force_rm'])
dataforce_sm = np.transpose(data_sm['sensed_force_sm'])
datamotion_rf = np.transpose(data_rf['robot_pos_rf'])
datamotion_sf = np.transpose(data_sf['robot_pos_sf'])
datamotion_rm = np.transpose(data_rm['robot_pos_rm'])
datamotion_sm = np.transpose(data_sm['robot_pos_sm'])
# Plot data
# Force
mpu.figure(1)
pp.subplot(221)
pp.title('Rigid Fixed',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_rf, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
pp.subplot(222)
pp.title('Soft Fixed',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_sf, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
pp.subplot(223)
pp.title('Rigid Movable',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_rm, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
pp.subplot(224)
pp.title('Soft Movable',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_sm, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
# Position
mpu.figure(2)
pp.subplot(221)
pp.title('Rigid Fixed',fontsize='24')
pp.xlabel('Position (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_rf, dataforce_rf, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.subplot(222)
pp.title('Soft Fixed',fontsize='24')
pp.xlabel('Position (m)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_sf, dataforce_sf, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.subplot(223)
pp.title('Rigid Movable',fontsize='24')
pp.xlabel('Position (m)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_rm, dataforce_rm, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.subplot(224)
pp.title('Soft-Movable',fontsize='24')
pp.xlabel('Position (m)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_sm, dataforce_sm, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/simulation_results/comparision_with_kNN_PCA/Combined/object_training/gen_data_object_training_new.py
|
Python
|
mit
| 17,324
|
[
"Mayavi"
] |
ab99d483cc1ebc53ea2f3e1d4ee907bd339db69f9dafc3f932e5ee14a1340e8a
|
# Implements a Gaussian process latent-variable model.
# The (high-dimensional) data, Y is explained by some low-dimensional latent
# data X, warped by a function drawn from a GP prior (f). So Y = f(X), but
# we don't know X or f.
#
# In this example, we optimize X and the hyperparameters of the GP, but
# we integrate over all possible functions f.
#
# David Duvenaud (duvenaud@gmail.com)
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import value_and_grad
from scipy.optimize import minimize
from autograd.scipy.stats import norm
from gaussian_process import make_gp_funs, rbf_covariance
def make_pinwheel_data(num_classes, num_per_class, rate=2.0, noise_std=0.001):
spoke_angles = np.linspace(0, 2*np.pi, num_classes+1)[:-1]
rs = npr.RandomState(0)
x = np.linspace(0.1, 1, num_per_class)
xs = np.concatenate([rate *x * np.cos(angle + x * rate) + noise_std * rs.randn(num_per_class)
for angle in spoke_angles])
ys = np.concatenate([rate *x * np.sin(angle + x * rate) + noise_std * rs.randn(num_per_class)
for angle in spoke_angles])
return np.concatenate([np.expand_dims(xs, 1), np.expand_dims(ys,1)], axis=1)
if __name__ == '__main__':
data_dimension = 2 # Normally the data dimension would be much higher.
latent_dimension = 2
# Build model and objective function.
params_per_gp, predict, log_marginal_likelihood = \
make_gp_funs(rbf_covariance, num_cov_params=latent_dimension + 1)
total_gp_params = data_dimension * params_per_gp
data = make_pinwheel_data(5, 40)
datalen = data.shape[0]
num_latent_params = datalen * latent_dimension
def unpack_params(params):
gp_params = np.reshape(params[:total_gp_params], (data_dimension, params_per_gp))
latents = np.reshape(params[total_gp_params:], (datalen, latent_dimension))
return gp_params, latents
def objective(params):
gp_params, latents = unpack_params(params)
gp_likelihood = sum([log_marginal_likelihood(gp_params[i], latents, data[:, i])
for i in range(data_dimension)])
latent_prior_likelihood = np.sum(norm.logpdf(latents))
return -gp_likelihood - latent_prior_likelihood
# Set up figure.
fig = plt.figure(figsize=(12,8), facecolor='white')
latent_ax = fig.add_subplot(121, frameon=False)
data_ax = fig.add_subplot(122, frameon=False)
plt.show(block=False)
def callback(params):
print("Log likelihood {}".format(-objective(params)))
gp_params, latents = unpack_params(params)
data_ax.cla()
data_ax.plot(data[:, 0], data[:, 1], 'bx')
data_ax.set_xticks([])
data_ax.set_yticks([])
data_ax.set_title('Observed Data')
latent_ax.cla()
latent_ax.plot(latents[:,0], latents[:,1], 'kx')
latent_ax.set_xticks([])
latent_ax.set_yticks([])
latent_ax.set_xlim([-2, 2])
latent_ax.set_ylim([-2, 2])
latent_ax.set_title('Latent coordinates')
plt.draw()
plt.pause(1.0/60.0)
# Initialize covariance parameters
rs = npr.RandomState(1)
init_params = rs.randn(total_gp_params + num_latent_params) * 0.1
print("Optimizing covariance parameters and latent variable locations...")
minimize(value_and_grad(objective), init_params, jac=True, method='CG', callback=callback)
|
barak/autograd
|
examples/gplvm.py
|
Python
|
mit
| 3,558
|
[
"Gaussian"
] |
8e957f84c559b69d385abeac3cc7f054fc42cb4fe8bd37452fcd88f29cd589a0
|
# -*- coding: utf-8 -*-
#
# brette_gerstner_fig_2c.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Test of the adapting exponential integrate and fire model in NEST
-----------------------------------------------------------------------
This example tests the adaptive integrate and fire model (AdEx) according to
Brette and Gerstner (2005) J. Neurophysiology and
reproduces figure 2.C of the paper.
Note that Brette&Gerstner give the value for b in nA.
To be consistent with the other parameters in the equations, b must be
converted to pA (pico Ampere).
See Also
~~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
import nest.voltage_trace
import pylab
nest.ResetKernel()
###############################################################################
# First we make sure that the resolution of the simulation is 0.1 ms. This is
# important, since the slop of the action potential is very steep.
res = 0.1
nest.SetKernelStatus({"resolution": res})
neuron = nest.Create("aeif_cond_alpha")
###############################################################################
# a and b are parameters of the adex model. Their values come from the
# publication.
nest.SetStatus(neuron, {"a": 4.0, "b": 80.5})
###############################################################################
# Next we define the stimulus protocol. There are two DC generators,
# producing stimulus currents during two time-intervals.
dc = nest.Create("dc_generator", 2)
nest.SetStatus(dc, [{"amplitude": 500.0, "start": 0.0, "stop": 200.0},
{"amplitude": 800.0, "start": 500.0, "stop": 1000.0}])
###############################################################################
# We connect the DC generators.
nest.Connect(dc, neuron, 'all_to_all')
###############################################################################
# And add a voltmeter to record the membrane potentials.
voltmeter = nest.Create("voltmeter")
###############################################################################
# We set the voltmeter to record in small intervals of 0.1 ms and connect the
# voltmeter to the neuron.
nest.SetStatus(voltmeter, {'interval': 0.1, "withgid": True, "withtime": True})
nest.Connect(voltmeter, neuron)
###############################################################################
# Finally, we simulate for 1000 ms and plot a voltage trace to produce the
# figure.
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
pylab.axis([0, 1000, -80, -20])
|
terhorstd/nest-simulator
|
pynest/examples/brette_gerstner_fig_2c.py
|
Python
|
gpl-2.0
| 3,145
|
[
"NEURON"
] |
2307c6167aaedc9054fd3a3e089b9993144287e645fb1f8b46f99fa789ed4334
|
#!/usr/bin/env python
"""Skeleton program to demonstrate reading and analyzing simulation output.
This program reads a simulation output file 'demo.fits' and then loops over
all overlapping groups with exactly two members, with some additional cuts on
the galaxy properties, finally saving images of each pair to an output
file 'pairs.fits'.
"""
import numpy as np
import galsim
import descwl
# Initialize an array of GalSim images that we will save.
images_to_save = [ ]
output_table = None
# Load the results of a previous simulation.
results = descwl.output.Reader('LSST_i.fits').results
catalog = results.table
# Select the brightest sources from groups with exactly 2 members.
selected = results.select('grp_size==2','grp_rank==0')
# Loop over these pair groups.
for index in selected:
# Skip groups where the overlap has essentially no effect on the brighter galaxy.
if catalog['purity'][index] > 0.95: continue
# Select all (both) members of this group.
grp_id = catalog['db_id'][index]
group = results.select('grp_id==%ld' % grp_id)
if output_table is None:
output_table = catalog[group].copy()
else:
output_table.add_row(catalog[group[0]])
output_table.add_row(catalog[group[1]])
# Print the fluxes of each galaxy (in order of increasing SNR).
print 'fluxes for group id %ld are %.3f,%.3f electrons.' % (
grp_id,catalog['flux'][group[0]],catalog['flux'][group[1]])
# Create and save an image of just this group.
image = results.get_subimage(group)
images_to_save.append(image)
# Save images of each galaxy individually, using the same bounding box.
for galaxy_index in group:
galaxy = image.copy()
galaxy.array[:] = 0.
stamp = results.get_stamp(galaxy_index)
galaxy[stamp.bounds] = stamp
images_to_save.append(galaxy)
print 'Saving %d images.' % len(images_to_save)
galsim.fits.writeMulti(images_to_save,file_name = 'pairs.fits')
print "Saving pair catalog."
output_table.write("pair_table.fits")
|
jmeyers314/willitdeblend
|
skeleton.py
|
Python
|
bsd-2-clause
| 2,042
|
[
"Galaxy"
] |
d2eafdd3eb9fdfb40697434cd3e02316933747428568c2e07f2ad241ddf53954
|
"""
Python Obit FitModel class
This class contains a parameterized image model component
FitModel Members with python interfaces:
======= =================================================================
name An optional name for the object.
type Model type of the model component:
* PointMod - Point
* GaussMod - Eliptical Gaussian
* USphereMod - Uniform optically thin sphere
* Background - Background wedge
Peak Peak density
DeltaX "X" (RA) offset (deg) of center from reference position
DeltaY "Y" (Dec) offset (deg) of center from reference position
nparm Number of parameters
parms Model parameters, type dependent
ePeak Error in Peak density
eDeltaX Error in "X" (RA) offset (deg) of center from reference position
eDeltaY Error in "Y" (Dec) offset (deg) of center from reference position
eparms Error in Model parameters, type dependent
======= =================================================================
"""
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2007,2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: bcotton@nrao.edu.
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Obit FitModel
from __future__ import absolute_import
from __future__ import print_function
import Obit, _Obit, OErr, ImageMosaic, InfoList, UV, ImageDesc, SkyGeom
# Python shadow class to ObitFitModel class
# class name in C
myClass = "ObitFitModel"
# Class data model type codes
PointMod = 0 # Point
GaussMod = 1 # Eliptical Gaussian
USphereMod = 2 # Uniform optically thin sphere
Background = 3 # Background wedge
class FitModel(Obit.FitModel):
"""
Python Obit FitModel class
This class contains a parameterized image model
FitModel Members with python interfaces:
"""
def __init__(self, name="no_name", mtype=PointMod, Peak=0.0, DeltaX=0.0, DeltaY=0.0, parms=[]) :
super(FitModel, self).__init__()
Obit.CreateFitModel(self.this, name, mtype, Peak, DeltaX, DeltaY, len(parms), parms)
self.myClass = myClass
def __del__(self, DeleteFitModel=_Obit.DeleteFitModel):
if _Obit!=None:
DeleteFitModel(self.this)
def __setattr__(self,name,value):
if name == "me" :
# Out with the old
if self.this!=None:
Obit.FitModelUnref(Obit.FitModel_Get_me(self.this))
# In with the new
Obit.FitModel_Set_me(self.this,value)
return
# members
if name=="type":
Obit.FitModelSetType(self.me,value)
return
if name=="Peak":
Obit.FitModelSetPeak(self.me,value)
return
if name=="DeltaX":
Obit.FitModelSetDeltaX(self.me,value)
return
if name=="DeltaY":
Obit.FitModelSetDeltaY(self.me,value)
return
if name=="nparm":
Obit.FitModelSetNparm(self.me,value)
return
if name=="parms":
Obit.FitModelSetNparm(self.me,len(value))
Obit.FitModelSetParms(self.me,value)
return
if name=="ePeak":
Obit.FitModelSetePeak(self.me,value)
return
if name=="eDeltaX":
Obit.FitModelSeteDeltaX(self.me,value)
return
if name=="eDeltaY;":
Obit.FitModelSeteDeltaY(self.me,value)
return
if name=="eparms":
Obit.FitModelSeteParms(self.me,value)
return
self.__dict__[name] = value
def __getattr__(self,name):
if not isinstance(self, FitModel):
return "Bogus Dudette "+str(self.__class__)
if name == "me" :
return Obit.FitModel_Get_me(self.this)
# members
if name=="type":
return Obit.FitModelGetType(self.me)
if name=="Peak":
return Obit.FitModelGetPeak(self.me)
if name=="DeltaX":
return Obit.FitModelGetDeltaX(self.me)
if name=="DeltaY":
return Obit.FitModelGetDeltaY(self.me)
if name=="nparm":
return Obit.FitModelGetNparm(self.me)
if name=="parms":
return Obit.FitModelGetParms(self.me)
if name=="ePeak":
return Obit.FitModelGetePeak(self.me)
if name=="eDeltaX":
return Obit.FitModelGeteDeltaX(self.me)
if name=="eDeltaY":
return Obit.FitModelGeteDeltaY(self.me)
if name=="eparms":
return Obit.FitModelGeteParms(self.me)
def __repr__(self):
if not isinstance(self, FitModel):
return "Bogus Dude "+str(self.__class__)
return "<C FitModel instance> " + Obit.FitModelGetName(self.me)
def cast(self, toClass):
""" Casts object pointer to specified class
Not sure if this is actually used it will not actually work as is
* self = object whose cast pointer is desired
* toClass = Class string to cast to
"""
################################################################
# Get pointer with type of this class
out = self.me
out = out.replace(self.myClass, toClass)
return out
# end cast
def DeconGau (self, ImDesc):
"""
Deconvolves Beam on an image descriptor from a Gaussian model component
Returns a FitModel with the deconvolved values
* self = object with Gaussian to be desired
* ImDesc = Image Descriptor with Beam
"""
################################################################
try:
out = FitModel("Deconvolved")
out.nparm = 3
Obit.DeconGau(self.me, out.me, ImDesc.me)
#print "dconv OK"
except:
#print "dconv failed"
out = None
else:
#print "dconv failed2"
#out = None
pass
return out
# end DeconGau
def Print (self, ImDesc, corner, file=None):
"""
Prepare human readable contents
Returns string with description of model
* self = object with Model to display
* ImDesc = Image Descriptor with Beam, etc.
* corner = bottom left corner in selected region of image (0-rel)
"""
################################################################
# Start output string
id = ImDesc.Dict
modelInfo = ""
# Collect info
type = self.type
parms = self.parms
eparms = self.eparms
# Gaussian
if type==GaussMod:
# Get celestial position
xpix = corner[0]+self.DeltaX; ypix = corner[1]+self.DeltaY
pos=SkyGeom.PWorldPos (xpix, ypix, id["crval"][0], id["crval"][1], \
id["crpix"][0], id["crpix"][1], \
id["cdelt"][0], id["cdelt"][1], id["crota"][1], \
id["ctype"][0][4:8], 0.0, 0.0)
rast = ImageDesc.PRA2HMS(pos[1])
decst = ImageDesc.PDec2DMS(pos[2])
# Position errors
era = self.eDeltaX * abs(id["cdelt"][0])*3600.0
edec = self.eDeltaY * abs(id["cdelt"][1])*3600.0
modelInfo += "RA "+rast+" (%8.3g asec), pixel %8.3f (%8.3g)\n" % (era, xpix, self.eDeltaX)
modelInfo += "Dec "+decst+" (%8.3g asec), pixel %8.3f (%8.3g)\n" % (edec, ypix, self.eDeltaY)
modelInfo += "Peak Flux density %8.3g (%8.3g) %s\n" % (self.Peak, self.ePeak, id["bunit"])
# Ratio of Gaussian beams if beam in ImDesc
if (id["beamMaj"]>0.0) and (id["beamMin"]>0.0):
ratio = (parms[0] * abs(id["cdelt"][0])*parms[1] * abs(id["cdelt"][1])) / \
(id["beamMaj"]*id["beamMin"])
modelInfo += "Integrated Flux density %8.3g (%8.3g) %s\n" % \
(self.Peak*ratio, self.ePeak*ratio, "Jy")
modelInfo += "Fitted Major axis %8.3f (%8.3g) asec, %8.3f (%8.3g) pixels\n" % \
(parms[0]*abs(id["cdelt"][0])*3600.0, eparms[0]*abs(id["cdelt"][0])*3600.0, \
parms[0], eparms[1])
modelInfo += "Fitted Minor axis %8.3f (%8.3g) asec, %8.3f (%8.3g) pixels\n" % \
(parms[1]*abs(id["cdelt"][1])*3600.0, eparms[1]*abs(id["cdelt"][1])*3600.0, \
parms[1], eparms[1])
modelInfo += "Fitted Position angle %8.5g (%8.3g) deg\n" % \
(parms[2]*57.296, eparms[2]*57.296)
if (id["beamMaj"]>0.0) and (id["beamMin"]>0.0):
# Deconvolve
deconMod = self.DeconGau(ImDesc)
if PIsA(deconMod) and deconMod.type>0:
modelInfo += "\nDeconvolved model\n"
dparms = deconMod.parms
deparms = deconMod.eparms
modelInfo += "Deconvolved Major axis %8.3g (%8.3g) asec, %8.3f (%8.3g) pixels\n" % \
(dparms[0]*abs(id["cdelt"][0])*3600.0, deparms[0]*abs(id["cdelt"][0])*3600.0, \
dparms[0], deparms[1])
modelInfo += "Deconvolved Minor axis %8.3g (%8.3g) asec, %8.3f (%8.3g) pixels\n" % \
(dparms[1]*abs(id["cdelt"][1])*3600.0, deparms[1]*abs(id["cdelt"][1])*3600.0, \
dparms[1], deparms[1])
modelInfo += "Deconvolved Position angle %8.5g (%8.3g) deg\n" % \
(dparms[2]*57.296, deparms[2]*57.296)
else:
modelInfo += "\nDeconvolution failed\n"
# end deconvolved
# end Gaussian
# done
return modelInfo
# end Print
# end class FitModel
def PIsA (inFitModel):
"""
Tells if input really a Python Obit FitModel
return True, False
* inFitModel = Python FitModel object
"""
################################################################
if not isinstance(inFitModel, FitModel):
print("Actually",inFitModel.__class__)
return False
# Checks - allow inheritence
return Obit.FitModelIsA(inFitModel.me)!=0
# end PIsA
|
kernsuite-debian/obit
|
python/FitModel.py
|
Python
|
gpl-2.0
| 11,509
|
[
"Gaussian"
] |
3c2049c6e4165265c510182f5f6f7e22f4c03f4a07403cee97faab28818cbd8c
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
import setuptools
from distutils.command.clean import clean as _clean
from distutils.command.build import build as _build
from setuptools.command.sdist import sdist as _sdist
from setuptools.command.build_ext import build_ext as _build_ext
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(filename):
with open(os.path.join(os.getcwd(),
'requirements',
filename)) as fp:
return filter(None, [strip_comments(l)
for l in fp.readlines()])
setup_ext = {}
if os.path.isfile('gulpfile.js'):
# 如果 gulpfile.js 存在, 就压缩前端代码
def gulp_build(done=[]):
if not done:
if os.system('npm install '
'--disturl=https://npm.taobao.org/dist '
'--registry=https://registry.npm.taobao.org'):
sys.exit(1)
if os.system('bower install'):
sys.exit(1)
if os.system('gulp build'):
sys.exit(1)
done.append(1)
def gulp_clean(done=[]):
if not done:
if os.system('npm install '
'--disturl=https://npm.taobao.org/dist '
'--registry=https://registry.npm.taobao.org'):
sys.exit(1)
if os.system('gulp clean'):
sys.exit(1)
done.append(1)
class build(_build):
sub_commands = _build.sub_commands[:]
# force to build ext
for ix, (name, checkfunc) in enumerate(sub_commands):
if name == 'build_ext':
sub_commands[ix] = (name, lambda self: True)
class build_ext(_build_ext):
def run(self):
gulp_build()
_build_ext.run(self)
class sdist(_sdist):
def run(self):
gulp_build()
_sdist.run(self)
class clean(_clean):
def run(self):
_clean.run(self)
gulp_clean()
setup_ext = {'cmdclass': {'sdist': sdist,
'clean': clean,
'build': build,
'build_ext': build_ext}}
setup_params = dict(
name="qsapp-suibe",
url="http://wiki.yimiqisan.com/",
version='1.0',
author="qisan",
author_email="qisanstudio@gmail.com",
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=reqs('install.txt'))
setup_params.update(setup_ext)
if __name__ == '__main__':
setuptools.setup(**setup_params)
|
qisanstudio/qsapp-suibe
|
setup.py
|
Python
|
mit
| 2,804
|
[
"GULP"
] |
f71536173f4ed45bee11a53e914fbd0f5f20eea68790a62801f456e9834a2c2e
|
#!/usr/bin/env python
import rospy
from pyassimp import pyassimp
rospy.init_node("stl")
file_name = rospy.get_param("~file_name")
if False:
import vtk
reader = vtk.vtkSTLReader()
reader.SetFileName(fil_name)
reader.Update()
polydata = reader.GetOutput()
print polydata.GetPoints()
print polydata.GetPolys()
scene = pyassimp.load(file_name)
print scene
mesh = scene.meshes[0]
print dir(mesh)
print mesh.faces[0].indices
# print mesh.vertices
# print mesh.
# TODO(lucasw) convert the vertices and indices to a shape_msg/Mesh
|
lucasw/simple_sim_ros
|
bullet_server/scripts/mesh.py
|
Python
|
gpl-3.0
| 560
|
[
"VTK"
] |
1bdbcfe7c7fada024275ccf13af5e78153d60264e9ba86962b08785be227976b
|
"""
Acceptance tests for Studio related to the container page.
The container page is used both for displaying units, and
for displaying containers within units.
"""
import datetime
import ddt
from nose.plugins.attrib import attr
from base_studio_test import ContainerBase
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.pages.studio.xblock_editor import XBlockEditorView, XBlockVisibilityEditorView
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.html_component_editor import HtmlXBlockEditorView
from common.test.acceptance.pages.studio.move_xblock import MoveModalView
from common.test.acceptance.pages.studio.utils import add_discussion
from common.test.acceptance.tests.helpers import create_user_partition_json
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
class NestedVerticalTest(ContainerBase):
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
self.container_title = ""
self.group_a = "Group A"
self.group_b = "Group B"
self.group_empty = "Group Empty"
self.group_a_item_1 = "Group A Item 1"
self.group_a_item_2 = "Group A Item 2"
self.group_b_item_1 = "Group B Item 1"
self.group_b_item_2 = "Group B Item 2"
self.group_a_handle = 0
self.group_a_item_1_handle = 1
self.group_a_item_2_handle = 2
self.group_empty_handle = 3
self.group_b_handle = 4
self.group_b_item_1_handle = 5
self.group_b_item_2_handle = 6
self.group_a_item_1_action_index = 0
self.group_a_item_2_action_index = 1
self.duplicate_label = "Duplicate of '{0}'"
self.discussion_label = "Discussion"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', 'Test Container').add_children(
XBlockFixtureDesc('vertical', 'Group A').add_children(
XBlockFixtureDesc('html', self.group_a_item_1),
XBlockFixtureDesc('html', self.group_a_item_2)
),
XBlockFixtureDesc('vertical', 'Group Empty'),
XBlockFixtureDesc('vertical', 'Group B').add_children(
XBlockFixtureDesc('html', self.group_b_item_1),
XBlockFixtureDesc('html', self.group_b_item_2)
)
)
)
)
)
)
@attr(shard=1)
class AddComponentTest(NestedVerticalTest):
"""
Tests of adding a component to the container page.
"""
def add_and_verify(self, menu_index, expected_ordering):
self.do_action_and_verify(
lambda (container): add_discussion(container, menu_index),
expected_ordering
)
def test_add_component_in_group(self):
group_b_menu = 2
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]},
{self.group_empty: []}]
self.add_and_verify(group_b_menu, expected_ordering)
def test_add_component_in_empty_group(self):
group_empty_menu = 1
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: [self.discussion_label]}]
self.add_and_verify(group_empty_menu, expected_ordering)
def test_add_component_in_container(self):
container_menu = 3
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.add_and_verify(container_menu, expected_ordering)
@attr(shard=1)
class DuplicateComponentTest(NestedVerticalTest):
"""
Tests of duplicating a component on the container page.
"""
def duplicate_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.duplicate(source_index),
expected_ordering
)
def test_duplicate_first_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering)
def test_duplicate_second_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_2)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering)
def test_duplicate_the_duplicate(self):
first_duplicate_label = self.duplicate_label.format(self.group_a_item_1)
second_duplicate_label = self.duplicate_label.format(first_duplicate_label)
expected_ordering = [
{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}
]
def duplicate_twice(container):
container.duplicate(self.group_a_item_1_action_index)
container.duplicate(self.group_a_item_1_action_index + 1)
self.do_action_and_verify(duplicate_twice, expected_ordering)
@attr(shard=1)
class DeleteComponentTest(NestedVerticalTest):
"""
Tests of deleting a component from the container page.
"""
def delete_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.delete(source_index),
expected_ordering
)
def test_delete_first_in_group(self):
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
# Group A itself has a delete icon now, so item_1 is index 1 instead of 0.
group_a_item_1_delete_index = 1
self.delete_and_verify(group_a_item_1_delete_index, expected_ordering)
@attr(shard=1)
class EditContainerTest(NestedVerticalTest):
"""
Tests of editing a container.
"""
def modify_display_name_and_verify(self, component):
"""
Helper method for changing a display name.
"""
modified_name = 'modified'
self.assertNotEqual(component.name, modified_name)
component.edit()
component_editor = XBlockEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Display Name', modified_name)
self.assertEqual(component.name, modified_name)
def test_edit_container_on_unit_page(self):
"""
Test the "edit" button on a container appearing on the unit page.
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
self.modify_display_name_and_verify(component)
def test_edit_container_on_container_page(self):
"""
Test the "edit" button on a container appearing on the container page.
"""
container = self.go_to_nested_container_page()
self.modify_display_name_and_verify(container)
def test_edit_raw_html(self):
"""
Test the raw html editing functionality.
"""
modified_content = "<p>modified content</p>"
#navigate to and open the component for editing
unit = self.go_to_unit_page()
container = unit.xblocks[1].go_to_container()
component = container.xblocks[1].children[0]
component.edit()
html_editor = HtmlXBlockEditorView(self.browser, component.locator)
html_editor.set_content_and_save(modified_content, raw=True)
#note we're expecting the <p> tags to have been removed
self.assertEqual(component.student_content, "modified content")
class BaseGroupConfigurationsTest(ContainerBase):
ALL_LEARNERS_AND_STAFF = XBlockVisibilityEditorView.ALL_LEARNERS_AND_STAFF
CHOOSE_ONE = "Select a group type"
CONTENT_GROUP_PARTITION = XBlockVisibilityEditorView.CONTENT_GROUP_PARTITION
ENROLLMENT_TRACK_PARTITION = XBlockVisibilityEditorView.ENROLLMENT_TRACK_PARTITION
MISSING_GROUP_LABEL = 'Deleted Group\nThis group no longer exists. Choose another group or remove the access restriction.'
VALIDATION_ERROR_LABEL = 'This component has validation issues.'
VALIDATION_ERROR_MESSAGE = "Error:\nThis component's access settings refer to deleted or invalid groups."
GROUP_VISIBILITY_MESSAGE = 'Access to some content in this unit is restricted to specific groups of learners.'
MODAL_NOT_RESTRICTED_MESSAGE = "Access is not restricted"
def setUp(self):
super(BaseGroupConfigurationsTest, self).setUp()
# Set up a cohort-schemed user partition
self.id_base = MINIMUM_STATIC_PARTITION_ID
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
self.id_base,
self.CONTENT_GROUP_PARTITION,
'Content Group Partition',
[
Group(self.id_base + 1, 'Dogs'),
Group(self.id_base + 2, 'Cats')
],
scheme="cohort"
)
],
},
})
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
def populate_course_fixture(self, course_fixture):
"""
Populate a simple course a section, subsection, and unit, and HTML component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Component')
)
)
)
)
def edit_component_visibility(self, component):
"""
Edit the visibility of an xblock on the container page and returns an XBlockVisibilityEditorView.
"""
component.edit_visibility()
return XBlockVisibilityEditorView(self.browser, component.locator)
def edit_unit_visibility(self, unit):
"""
Edit the visibility of a unit on the container page and returns an XBlockVisibilityEditorView.
"""
unit.edit_visibility()
return XBlockVisibilityEditorView(self.browser, unit.locator)
def verify_current_groups_message(self, visibility_editor, expected_current_groups):
"""
Check that the current visibility is displayed at the top of the dialog.
"""
if expected_current_groups == self.ALL_LEARNERS_AND_STAFF:
self.assertEqual("Access is not restricted", visibility_editor.current_groups_message)
else:
self.assertEqual(
"Access is restricted to: {groups}".format(groups=expected_current_groups),
visibility_editor.current_groups_message
)
def verify_selected_partition_scheme(self, visibility_editor, expected_scheme):
"""
Check that the expected partition scheme is selected.
"""
self.assertItemsEqual(expected_scheme, visibility_editor.selected_partition_scheme)
def verify_selected_groups(self, visibility_editor, expected_groups):
"""
Check the expected partition groups.
"""
self.assertItemsEqual(expected_groups, [group.text for group in visibility_editor.selected_groups])
def select_and_verify_saved(self, component, partition_label, groups=[]):
"""
Edit the visibility of an xblock on the container page and
verify that the edit persists. Note that `groups`
are labels which should be clicked, but not necessarily checked.
"""
# Make initial edit(s) and save
visibility_editor = self.edit_component_visibility(component)
visibility_editor.select_groups_in_partition_scheme(partition_label, groups)
# Re-open the modal and inspect its selected inputs. If no groups were selected,
# "All Learners" should be selected partitions scheme, and we show "Select a group type" in the select.
if not groups:
partition_label = self.CHOOSE_ONE
visibility_editor = self.edit_component_visibility(component)
self.verify_selected_partition_scheme(visibility_editor, partition_label)
self.verify_selected_groups(visibility_editor, groups)
visibility_editor.save()
def select_and_verify_unit_group_access(self, unit, partition_label, groups=[]):
"""
Edit the visibility of an xblock on the unit page and
verify that the edit persists. Note that `groups`
are labels which should be clicked, but are not necessarily checked.
"""
unit_access_editor = self.edit_unit_visibility(unit)
unit_access_editor.select_groups_in_partition_scheme(partition_label, groups)
if not groups:
partition_label = self.CHOOSE_ONE
unit_access_editor = self.edit_unit_visibility(unit)
self.verify_selected_partition_scheme(unit_access_editor, partition_label)
self.verify_selected_groups(unit_access_editor, groups)
unit_access_editor.save()
def verify_component_validation_error(self, component):
"""
Verify that we see validation errors for the given component.
"""
self.assertTrue(component.has_validation_error)
self.assertEqual(component.validation_error_text, self.VALIDATION_ERROR_LABEL)
self.assertEqual([self.VALIDATION_ERROR_MESSAGE], component.validation_error_messages)
def verify_visibility_set(self, component, is_set):
"""
Verify that the container page shows that component visibility
settings have been edited if `is_set` is True; otherwise
verify that the container page shows no such information.
"""
if is_set:
self.assertIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertTrue(component.has_group_visibility_set)
else:
self.assertNotIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertFalse(component.has_group_visibility_set)
def verify_unit_visibility_set(self, unit, set_groups=[]):
"""
Verify that the container visibility modal shows that unit visibility
settings have been edited if there are `set_groups`. Otherwise verify
that the modal shows no such information.
"""
unit_access_editor = self.edit_unit_visibility(unit)
if set_groups:
self.assertIn(", ".join(set_groups), unit_access_editor.current_groups_message)
else:
self.assertEqual(self.MODAL_NOT_RESTRICTED_MESSAGE, unit_access_editor.current_groups_message)
unit_access_editor.cancel()
def update_component(self, component, metadata):
"""
Update a component's metadata and refresh the page.
"""
self.course_fixture._update_xblock(component.locator, {'metadata': metadata})
self.browser.refresh()
self.container_page.wait_for_page()
def remove_missing_groups(self, visibility_editor, component):
"""
Deselect the missing groups for a component. After save,
verify that there are no missing group messages in the modal
and that there is no validation error on the component.
"""
for option in visibility_editor.all_group_options:
if option.text == self.MISSING_GROUP_LABEL:
option.click()
visibility_editor.save()
visibility_editor = self.edit_component_visibility(component)
self.assertNotIn(self.MISSING_GROUP_LABEL, [item.text for item in visibility_editor.all_group_options])
visibility_editor.cancel()
self.assertFalse(component.has_validation_error)
class UnitAccessContainerTest(BaseGroupConfigurationsTest):
GROUP_RESTRICTED_MESSAGE = 'Access to this unit is restricted to: Dogs'
def _toggle_container_unit_access(self, group_ids, unit):
"""
Toggle the unit level access on the course outline page
"""
unit.toggle_unit_access('Content Groups', group_ids)
def _verify_container_unit_access_message(self, group_ids, expected_message):
"""
Check that the container page displays the correct unit
access message.
"""
self.outline.visit()
self.outline.expand_all_subsections()
unit = self.outline.section_at(0).subsection_at(0).unit_at(0)
self._toggle_container_unit_access(group_ids, unit)
container_page = self.go_to_unit_page()
self.assertEqual(str(container_page.get_xblock_access_message()), expected_message)
def test_default_selection(self):
"""
Tests that no message is displayed when there are no
restrictions on the unit or components.
"""
self._verify_container_unit_access_message([], '')
def test_restricted_components_message(self):
"""
Test that the proper message is displayed when access to
some components is restricted.
"""
container_page = self.go_to_unit_page()
html_component = container_page.xblocks[1]
# Initially set visibility to Dog group.
self.update_component(
html_component,
{'group_access': {self.id_base: [self.id_base + 1]}}
)
self._verify_container_unit_access_message([], self.GROUP_VISIBILITY_MESSAGE)
def test_restricted_access_message(self):
"""
Test that the proper message is displayed when access to the
unit is restricted to a particular group.
"""
self._verify_container_unit_access_message([self.id_base + 1], self.GROUP_RESTRICTED_MESSAGE)
@attr(shard=3)
class ContentGroupVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (content groups).
"""
def test_default_selection(self):
"""
Scenario: The component visibility modal selects visible to all by default.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
Then the default visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
visibility_dialog = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_dialog, self.ALL_LEARNERS_AND_STAFF)
self.verify_selected_partition_scheme(visibility_dialog, self.CHOOSE_ONE)
visibility_dialog.cancel()
self.verify_visibility_set(self.html_component, False)
def test_reset_to_all_students_and_staff(self):
"""
Scenario: The component visibility modal can be set to be visible to all students and staff.
Given I have a unit with one component
When I go to the container page for that unit
Then the container page should not display the content visibility warning by default.
If I then restrict access and save, and then I open the visibility editor modal for that unit's component
And I select 'All Students and Staff'
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And the container page should still not display the content visibility warning
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_visibility_set(self.html_component, False)
def test_reset_unit_access_to_all_students_and_staff(self):
"""
Scenario: The unit visibility modal can be set to be visible to all students and staff.
Given I have a unit
When I go to the container page for that unit
And I open the visibility editor modal for that unit
And I select 'Dogs'
And I save the modal
Then I re-open the modal, the unit access modal should display the content visibility settings
Then after re-opening the modal again
And I select 'All Learners and Staff'
And I save the modal
And I re-open the modal, the unit access modal should display that no content is restricted
"""
self.select_and_verify_unit_group_access(self.container_page, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.verify_unit_visibility_set(self.container_page, set_groups=["Dogs"])
self.select_and_verify_unit_group_access(self.container_page, self.ALL_LEARNERS_AND_STAFF)
self.verify_unit_visibility_set(self.container_page)
def test_select_single_content_group(self):
"""
Scenario: The component visibility modal can be set to be visible to one content group.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs'
And I save the modal
Then the visibility selection should be 'Dogs' and 'Specific Content Groups'
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
def test_select_multiple_content_groups(self):
"""
Scenario: The component visibility modal can be set to be visible to multiple content groups.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs' and 'Cats'
And I save the modal
Then the visibility selection should be 'Dogs', 'Cats', and 'Specific Content Groups'
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs', 'Cats'])
def test_select_zero_content_groups(self):
"""
Scenario: The component visibility modal can not be set to be visible to 'Specific Content Groups' without
selecting those specific groups.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Specific Content Groups'
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
self.select_and_verify_saved(
self.html_component, self.CONTENT_GROUP_PARTITION
)
self.verify_visibility_set(self.html_component, False)
def test_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And the container page should display the content visibility warning
And I de-select the missing groups
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And I should not see any validation errors on the component
And the container page should not display the content visibility warning
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
"Deleted Group, Deleted Group",
[self.MISSING_GROUP_LABEL] * 2
)
self.verify_visibility_set(self.html_component, False)
def test_found_and_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids and multiple known group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid and valid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And then if I de-select the missing groups
And I save the modal
Then the visibility selection should be the names of the valid groups.
And I should not see any validation errors on the component
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 1, self.id_base + 2, self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
'Dogs, Cats, Deleted Group, Deleted Group',
['Dogs', 'Cats'] + [self.MISSING_GROUP_LABEL] * 2
)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
expected_groups = ['Dogs', 'Cats']
self.verify_current_groups_message(visibility_editor, ", ".join(expected_groups))
self.verify_selected_groups(visibility_editor, expected_groups)
def _verify_and_remove_missing_content_groups(self, current_groups_message, all_group_labels):
self.verify_component_validation_error(self.html_component)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
self.verify_current_groups_message(visibility_editor, current_groups_message)
self.verify_selected_groups(visibility_editor, all_group_labels)
self.remove_missing_groups(visibility_editor, self.html_component)
@attr(shard=3)
class EnrollmentTrackVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (enrollment tracks).
"""
AUDIT_TRACK = "Audit Track"
VERIFIED_TRACK = "Verified Track"
def setUp(self):
super(EnrollmentTrackVisibilityModalTest, self).setUp()
# Add an audit mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'audit', mode_display_name=self.AUDIT_TRACK).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified',
mode_display_name=self.VERIFIED_TRACK, min_price=10
).visit()
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
# Initially set visibility to Verified track.
self.update_component(
self.html_component,
{'group_access': {ENROLLMENT_TRACK_PARTITION_ID: [2]}} # "2" is Verified
)
def verify_component_group_visibility_messsage(self, component, expected_groups):
"""
Verifies that the group visibility message below the component display name is correct.
"""
if not expected_groups:
self.assertIsNone(component.get_partition_group_message)
else:
self.assertEqual("Access restricted to: " + expected_groups, component.get_partition_group_message)
def test_setting_enrollment_tracks(self):
"""
Test that enrollment track groups can be selected.
"""
# Verify that the "Verified" Group is shown on the unit page (under the unit display name).
self.verify_component_group_visibility_messsage(self.html_component, "Verified Track")
# Open dialog with "Verified" already selected.
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_editor, self.VERIFIED_TRACK)
self.verify_selected_partition_scheme(
visibility_editor,
self.ENROLLMENT_TRACK_PARTITION
)
self.verify_selected_groups(visibility_editor, [self.VERIFIED_TRACK])
visibility_editor.cancel()
# Select "All Learners and Staff". The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_component_group_visibility_messsage(self.html_component, None)
# Select "Audit" enrollment track. The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ENROLLMENT_TRACK_PARTITION, [self.AUDIT_TRACK])
self.verify_component_group_visibility_messsage(self.html_component, "Audit Track")
@attr(shard=1)
class UnitPublishingTest(ContainerBase):
"""
Tests of the publishing control and related widgets on the Unit page.
"""
PUBLISHED_STATUS = "Publishing Status\nPublished (not yet released)"
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
LOCKED_STATUS = "Publishing Status\nVisible to Staff Only"
RELEASE_TITLE_RELEASED = "RELEASED:"
RELEASE_TITLE_RELEASE = "RELEASE:"
LAST_PUBLISHED = 'Last published'
LAST_SAVED = 'Draft saved on'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with a unit and a single HTML child.
"""
self.html_content = '<p><strong>Body of HTML Unit.</strong></p>'
self.courseware = CoursewarePage(self.browser, self.course_id)
past_start_date = datetime.datetime(1974, 6, 22)
self.past_start_date_text = "Jun 22, 1974 at 00:00 UTC"
future_start_date = datetime.datetime(2100, 9, 13)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Test html', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unlocked Section',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('problem', '<problem></problem>', data=self.html_content)
)
)
),
XBlockFixtureDesc('chapter', 'Section With Locked Unit').add_children(
XBlockFixtureDesc(
'sequential',
'Subsection With Locked Unit',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc(
'vertical',
'Locked Unit',
metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('discussion', '', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unreleased Section',
metadata={'start': future_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unreleased Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unreleased Unit')
)
)
)
def test_publishing(self):
"""
Scenario: The publish title changes based on whether or not draft content exists
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the title in the Publish information box is "Published and Live"
And the Publish button is disabled
And the last published text contains "Last published"
And the last saved text contains "Last published"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the last saved text contains "Draft saved on"
And the Publish button is enabled
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And the last published text contains "Last published"
And the last saved text contains "Last published"
"""
unit = self.go_to_unit_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
# Start date set in course fixture to 1970.
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, 'Jan 01, 1970 at 00:00 UTC\nwith Section "Test Section"'
)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
# Should not be able to click on Publish action -- but I don't know how to test that it is not clickable.
# TODO: continue discussion with Muhammad and Jay about this.
# Add a component to the page so it will have unpublished changes.
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_SAVED)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
def test_discard_changes(self):
"""
Scenario: The publish title changes after "Discard Changes" is clicked
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the Discard Changes button is disabled
And I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the Discard Changes button is enabled
And when I click the Discard Changes button
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.discard_changes()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_view_live_no_changes(self):
"""
Scenario: "View Live" shows published content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the View Live button is enabled
And when I click on the View Live button
Then I see the published content in LMS
"""
unit = self.go_to_unit_page()
self._view_published_version(unit)
self._verify_components_visible(['html'])
def test_view_live_changes(self):
"""
Scenario: "View Live" does not show draft content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click on the View Live button
Then I see the published content in LMS
And I do not see the unpublished component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
self._view_published_version(unit)
self._verify_components_visible(['html'])
self.assertEqual(self.html_content, self.courseware.xblock_component_html_content(0))
def test_view_live_after_publish(self):
"""
Scenario: "View Live" shows newly published content
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click the Publish button
And when I click on the View Live button
Then I see the newly published component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.publish_action.click()
self._view_published_version(unit)
self._verify_components_visible(['html', 'discussion'])
def test_initially_unlocked_visible_to_students(self):
"""
Scenario: An unlocked unit with release date in the past is visible to students
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
Then the unit has a warning that it is visible to students
And it is marked as "RELEASED" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, self.past_start_date_text + '\n' + 'with Section "Unlocked Section"'
)
self._view_published_version(unit)
self._verify_student_view_visible(['problem'])
def test_locked_visible_to_staff_only(self):
"""
Scenario: After locking a unit with release date in the past, it is only visible to staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I select "Hide from students"
Then the unit does not have a warning that it is visible to students
And the unit does not display inherited staff lock
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
checked = unit.toggle_staff_lock()
self.assertTrue(checked)
self.assertFalse(unit.currently_visible_to_students)
self.assertFalse(unit.shows_inherited_staff_lock())
unit.verify_publish_title(self.LOCKED_STATUS)
self._view_published_version(unit)
# Will initially be in staff view, locked component should be visible.
self._verify_components_visible(['problem'])
# Switch to student view and verify not visible
self._verify_student_view_locked()
def test_initially_locked_not_visible_to_students(self):
"""
Scenario: A locked unit with release date in the past is not visible to students
Given I have a published locked unit with release date in the past
When I go to the unit page in Studio
Then the unit does not have a warning that it is visible to students
And it is marked as "RELEASE" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
unit.verify_publish_title(self.LOCKED_STATUS)
self.assertFalse(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASE,
self.past_start_date_text + '\n' + 'with Subsection "Subsection With Locked Unit"'
)
self._view_published_version(unit)
self._verify_student_view_locked()
def test_unlocked_visible_to_all(self):
"""
Scenario: After unlocking a unit with release date in the past, it is visible to both students and staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I deselect "Hide from students"
Then the unit does have a warning that it is visible to students
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
checked = unit.toggle_staff_lock()
self.assertFalse(checked)
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._view_published_version(unit)
# Will initially be in staff view, components always visible.
self._verify_components_visible(['discussion'])
# Switch to student view and verify visible.
self._verify_student_view_visible(['discussion'])
def test_explicit_lock_overrides_implicit_subsection_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
subsection = self.outline.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_explicit_lock_overrides_implicit_section_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
section = self.outline.section_at(0)
unit = section.subsection_at(0).unit_at(0)
section.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_published_unit_with_draft_child(self):
"""
Scenario: A published unit with a draft child can be published
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of the only component
Then the content changes
And the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see the changed content in LMS
"""
modified_content = 'modified content'
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlXBlockEditorView(self.browser, component.locator).set_content_and_save(modified_content)
self.assertEqual(component.student_content, modified_content)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertIn(modified_content, self.courseware.xblock_component_html_content(0))
def test_cancel_does_not_create_draft(self):
"""
Scenario: Editing a component and then canceling does not create a draft version (TNL-399)
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of an HTML component and then press cancel
Then the content does not change
And the title in the Publish information box is "Published and Live"
And when I reload the page
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlXBlockEditorView(self.browser, component.locator).set_content_and_cancel("modified content")
self.assertEqual(component.student_content, "Body of HTML Unit.")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.browser.refresh()
unit.wait_for_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_delete_child_in_published_unit(self):
"""
Scenario: A published unit can be published again after deleting a child
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And delete the only component
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see an empty unit in LMS
"""
unit = self.go_to_unit_page()
unit.delete(0)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertEqual(0, self.courseware.num_xblock_components)
def test_published_not_live(self):
"""
Scenario: The publish title displays correctly for units that are not live
Given I have a published unit with no unpublished changes that releases in the future
When I go to the unit page in Studio
Then the title in the Publish information box is "Published (not yet released)"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published (not yet released)"
"""
unit = self.go_to_unit_page('Unreleased Section', 'Unreleased Subsection', 'Unreleased Unit')
unit.verify_publish_title(self.PUBLISHED_STATUS)
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_STATUS)
def _view_published_version(self, unit):
"""
Goes to the published version, then waits for the browser to load the page.
"""
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
self.courseware.wait_for_page()
def _verify_and_return_staff_page(self):
"""
Verifies that the browser is on the staff page and returns a StaffCoursewarePage.
"""
page = StaffCoursewarePage(self.browser, self.course_id)
page.wait_for_page()
return page
def _verify_student_view_locked(self):
"""
Verifies no component is visible when viewing as a student.
"""
self._verify_and_return_staff_page().set_staff_view_mode('Learner')
self.assertEqual(0, self.courseware.num_xblock_components)
def _verify_student_view_visible(self, expected_components):
"""
Verifies expected components are visible when viewing as a student.
"""
self._verify_and_return_staff_page().set_staff_view_mode('Learner')
self._verify_components_visible(expected_components)
def _verify_components_visible(self, expected_components):
"""
Verifies the expected components are visible (and there are no extras).
"""
self.assertEqual(len(expected_components), self.courseware.num_xblock_components)
for index, component in enumerate(expected_components):
self.assertEqual(component, self.courseware.xblock_component_type(index))
def _verify_release_date_info(self, unit, expected_title, expected_date):
"""
Verifies how the release date is displayed in the publishing sidebar.
"""
self.assertEqual(expected_title, unit.release_title)
self.assertEqual(expected_date, unit.release_date)
def _verify_last_published_and_saved(self, unit, expected_published_prefix, expected_saved_prefix):
"""
Verifies that last published and last saved messages respectively contain the given strings.
"""
self.assertIn(expected_published_prefix, unit.last_published_text)
self.assertIn(expected_saved_prefix, unit.last_saved_text)
def _verify_explicit_lock_overrides_implicit_lock_information(self, unit_page):
"""
Verifies that a unit with inherited staff lock does not display inherited information when explicitly locked.
"""
self.assertTrue(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertFalse(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertTrue(unit_page.shows_inherited_staff_lock())
# TODO: need to work with Jay/Christine to get testing of "Preview" working.
# def test_preview(self):
# unit = self.go_to_unit_page()
# add_discussion(unit)
# unit.preview()
# self.assertEqual(2, self.courseware.num_xblock_components)
# self.assertEqual('html', self.courseware.xblock_component_type(0))
# self.assertEqual('discussion', self.courseware.xblock_component_type(1))
@attr(shard=3)
class DisplayNameTest(ContainerBase):
"""
Test consistent use of display_name_with_default
"""
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', None)
)
)
)
)
def test_display_name_default(self):
"""
Scenario: Given that an XBlock with a dynamic display name has been added to the course,
When I view the unit page and note the display name of the block,
Then I see the dynamically generated display name,
And when I then go to the container page for that same block,
Then I see the same generated display name.
"""
# Unfortunately no blocks in the core platform implement display_name_with_default
# in an interesting way for this test, so we are just testing for consistency and not
# the actual value.
unit = self.go_to_unit_page()
test_block = unit.xblocks[1]
title_on_unit_page = test_block.name
container = test_block.go_to_container()
self.assertEqual(container.name, title_on_unit_page)
@attr(shard=3)
class ProblemCategoryTabsTest(ContainerBase):
"""
Test to verify tabs in problem category.
"""
def setUp(self, is_staff=True):
super(ProblemCategoryTabsTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Sets up course structure.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_correct_tabs_present(self):
"""
Scenario: Verify that correct tabs are present in problem category.
Given I am a staff user
When I go to unit page
Then I only see `Common Problem Types` and `Advanced` tabs in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
self.assertEqual(page.get_category_tab_names('problem'), ['Common Problem Types', 'Advanced'])
def test_common_problem_types_tab(self):
"""
Scenario: Verify that correct components are present in Common Problem Types tab.
Given I am a staff user
When I go to unit page
Then I see correct components under `Common Problem Types` tab in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
expected_components = [
"Blank Common Problem",
"Checkboxes",
"Dropdown",
"Multiple Choice",
"Numerical Input",
"Text Input",
"Checkboxes with Hints and Feedback",
"Dropdown with Hints and Feedback",
"Multiple Choice with Hints and Feedback",
"Numerical Input with Hints and Feedback",
"Text Input with Hints and Feedback",
]
self.assertEqual(page.get_category_tab_components('problem', 1), expected_components)
@attr(shard=1)
@ddt.ddt
class MoveComponentTest(ContainerBase):
"""
Tests of moving an XBlock to another XBlock.
"""
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
def setUp(self, is_staff=True):
super(MoveComponentTest, self).setUp(is_staff=is_staff)
self.container = ContainerPage(self.browser, None)
self.move_modal_view = MoveModalView(self.browser)
self.navigation_options = {
'section': 0,
'subsection': 0,
'unit': 1,
}
self.source_component_display_name = 'HTML 11'
self.source_xblock_category = 'component'
self.message_move = 'Success! "{display_name}" has been moved.'
self.message_undo = 'Move cancelled. "{display_name}" has been moved back to its original location.'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure.
"""
# pylint: disable=attribute-defined-outside-init
self.unit_page1 = XBlockFixtureDesc('vertical', 'Test Unit 1').add_children(
XBlockFixtureDesc('html', 'HTML 11'),
XBlockFixtureDesc('html', 'HTML 12')
)
self.unit_page2 = XBlockFixtureDesc('vertical', 'Test Unit 2').add_children(
XBlockFixtureDesc('html', 'HTML 21'),
XBlockFixtureDesc('html', 'HTML 22')
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
self.unit_page1,
self.unit_page2
)
)
)
def verify_move_opertions(self, unit_page, source_component, operation, component_display_names_after_operation,
should_verify_publish_title=True):
"""
Verify move operations.
Arguments:
unit_page (Object) Unit container page.
source_component (Object) Source XBlock object to be moved.
operation (str), `move` or `undo move` operation.
component_display_names_after_operation (dict) Display names of components after operation in source/dest
should_verify_publish_title (Boolean) Should verify publish title ot not. Default is True.
"""
source_component.open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
if should_verify_publish_title:
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
# Verify unit in draft state now
if should_verify_publish_title:
self.container.verify_publish_title(self.DRAFT_STATUS)
if operation == 'move':
self.container.click_take_me_there_link()
elif operation == 'undo_move':
self.container.click_undo_move_link()
self.container.verify_confirmation_message(
self.message_undo.format(display_name=self.source_component_display_name)
)
unit_page = ContainerPage(self.browser, None)
components = unit_page.displayed_children
self.assertEqual(
[component.name for component in components],
component_display_names_after_operation
)
def verify_state_change(self, unit_page, operation):
"""
Verify that after state change, confirmation message is hidden.
Arguments:
unit_page (Object) Unit container page.
operation (String) Publish or discard changes operation.
"""
# Verify unit in draft state now
self.container.verify_publish_title(self.DRAFT_STATUS)
# Now click publish/discard button
if operation == 'publish':
unit_page.publish_action.click()
else:
unit_page.discard_changes()
# Now verify success message is hidden
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.container.verify_confirmation_message(
message=self.message_move.format(display_name=self.source_component_display_name),
verify_hidden=True
)
def test_move_component_successfully(self):
"""
Test if we can move a component successfully.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on take me there link
Then I see moved component there.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
self.verify_move_opertions(
unit_page=unit_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 11']
)
def test_undo_move_component_successfully(self):
"""
Test if we can undo move a component successfully.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
When I click on the move button
Then I see move operation successful message
And When I clicked on undo move link
Then I see that undo move operation is successful
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
self.verify_move_opertions(
unit_page=unit_page,
source_component=components[0],
operation='undo_move',
component_display_names_after_operation=['HTML 11', 'HTML 12']
)
@ddt.data('publish', 'discard')
def test_publish_discard_changes_afer_move(self, operation):
"""
Test if success banner is hidden when we discard changes or publish the unit after a move operation.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on publish or discard changes button
Then I see move operation success message is hidden.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
components[0].open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
self.verify_state_change(unit_page, operation)
def test_content_experiment(self):
"""
Test if we can move a component of content experiment successfully.
Given that I am a staff user
And I go to content experiment page
And I open the move dialogue modal
When I navigate to the unit in second section
Then I see move button is enabled
And when I click on the move button
Then I see move operation success message
And when I click on take me there link
Then I see moved component there
And when I undo move a component
Then I see that undo move operation success message
"""
# Add content experiment support to course.
self.course_fixture.add_advanced_settings({
u'advanced_modules': {'value': ['split_test']},
})
# Create group configurations
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
'metadata': {
u'user_partitions': [
create_user_partition_json(
0,
'Test Group Configuration',
'Description of the group configuration.',
[Group('0', 'Group A'), Group('1', 'Group B')]
),
],
},
})
# Add split test to unit_page1 and assign newly created group configuration to it
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
self.course_fixture.create_xblock(self.unit_page1.locator, split_test)
# Visit content experiment container page.
unit_page = ContainerPage(self.browser, split_test.locator)
unit_page.visit()
group_a_locator = unit_page.displayed_children[0].locator
# Add some components to Group A.
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 311')
)
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 312')
)
# Go to group page to move it's component.
group_container_page = ContainerPage(self.browser, group_a_locator)
group_container_page.visit()
# Verify content experiment block has correct groups and components.
components = group_container_page.displayed_children
self.assertEqual(len(components), 2)
self.source_component_display_name = 'HTML 311'
# Verify undo move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='undo_move',
component_display_names_after_operation=['HTML 311', 'HTML 312'],
should_verify_publish_title=False
)
# Verify move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 311'],
should_verify_publish_title=False
)
# Ideally this test should be decorated with @attr('a11y') so that it should run in a11y jenkins job
# But for some reason it always fails in a11y jenkins job and passes always locally on devstack as well
# as in bokchoy jenkins job. Due to this reason, test is marked to run under bokchoy jenkins job.
def test_a11y(self):
"""
Verify move modal a11y.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
unit_page.a11y_audit.config.set_scope(
include=[".modal-window.move-modal"]
)
unit_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # TODO: AC-716
'link-href', # TODO: AC-716
]
})
unit_page.displayed_children[0].open_move_modal()
for category in ['section', 'subsection', 'component']:
self.move_modal_view.navigate_to_category(category, self.navigation_options)
unit_page.a11y_audit.check_for_accessibility_errors()
|
lduarte1991/edx-platform
|
common/test/acceptance/tests/studio/test_studio_container.py
|
Python
|
agpl-3.0
| 70,544
|
[
"VisIt"
] |
8ba58f97a4d9abd1c9e540640e559906bf5f41c9953ffb5a0d147f1abfa29e3e
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from shutil import rmtree
import tempfile
import unittest
from pyspark.ml import Transformer
from pyspark.ml.classification import DecisionTreeClassifier, FMClassifier, \
FMClassificationModel, LogisticRegression, MultilayerPerceptronClassifier, \
MultilayerPerceptronClassificationModel, OneVsRest, OneVsRestModel
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import Binarizer, HashingTF, PCA
from pyspark.ml.linalg import Vectors
from pyspark.ml.param import Params
from pyspark.ml.pipeline import Pipeline, PipelineModel
from pyspark.ml.regression import DecisionTreeRegressor, GeneralizedLinearRegression, \
GeneralizedLinearRegressionModel, \
LinearRegression
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWriter
from pyspark.ml.wrapper import JavaParams
from pyspark.testing.mlutils import MockUnaryTransformer, SparkSessionTestCase
class TestDefaultSolver(SparkSessionTestCase):
def test_multilayer_load(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense([0.0, 0.0])),
(1.0, Vectors.dense([0.0, 1.0])),
(1.0, Vectors.dense([1.0, 0.0])),
(0.0, Vectors.dense([1.0, 1.0]))],
["label", "features"])
mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
model = mlp.fit(df)
self.assertEqual(model.getSolver(), "l-bfgs")
transformed1 = model.transform(df)
path = tempfile.mkdtemp()
model_path = path + "/mlp"
model.save(model_path)
model2 = MultilayerPerceptronClassificationModel.load(model_path)
self.assertEqual(model2.getSolver(), "l-bfgs")
transformed2 = model2.transform(df)
self.assertEqual(transformed1.take(4), transformed2.take(4))
def test_fm_load(self):
df = self.spark.createDataFrame([(1.0, Vectors.dense(1.0)),
(0.0, Vectors.sparse(1, [], []))],
["label", "features"])
fm = FMClassifier(factorSize=2, maxIter=50, stepSize=2.0)
model = fm.fit(df)
self.assertEqual(model.getSolver(), "adamW")
transformed1 = model.transform(df)
path = tempfile.mkdtemp()
model_path = path + "/fm"
model.save(model_path)
model2 = FMClassificationModel.load(model_path)
self.assertEqual(model2.getSolver(), "adamW")
transformed2 = model2.transform(df)
self.assertEqual(transformed1.take(2), transformed2.take(2))
def test_glr_load(self):
df = self.spark.createDataFrame([(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0))],
["label", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
model = glr.fit(df)
self.assertEqual(model.getSolver(), "irls")
transformed1 = model.transform(df)
path = tempfile.mkdtemp()
model_path = path + "/glr"
model.save(model_path)
model2 = GeneralizedLinearRegressionModel.load(model_path)
self.assertEqual(model2.getSolver(), "irls")
transformed2 = model2.transform(df)
self.assertEqual(transformed1.take(4), transformed2.take(4))
class PersistenceTest(SparkSessionTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_linear_regression_pmml_basic(self):
# Most of the validation is done in the Scala side, here we just check
# that we output text rather than parquet (e.g. that the format flag
# was respected).
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=1)
model = lr.fit(df)
path = tempfile.mkdtemp()
lr_path = path + "/lr-pmml"
model.write().format("pmml").save(lr_path)
pmml_text_list = self.sc.textFile(lr_path).collect()
pmml_text = "\n".join(pmml_text_list)
self.assertIn("Apache Spark", pmml_text)
self.assertIn("PMML", pmml_text)
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_kmeans(self):
kmeans = KMeans(k=2, seed=1)
path = tempfile.mkdtemp()
km_path = path + "/km"
kmeans.save(km_path)
kmeans2 = KMeans.load(km_path)
self.assertEqual(kmeans.uid, kmeans2.uid)
self.assertEqual(type(kmeans.uid), type(kmeans2.uid))
self.assertEqual(kmeans2.uid, kmeans2.k.parent,
"Loaded KMeans instance uid (%s) did not match Param's uid (%s)"
% (kmeans2.uid, kmeans2.k.parent))
self.assertEqual(kmeans._defaultParamMap[kmeans.k], kmeans2._defaultParamMap[kmeans2.k],
"Loaded KMeans instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_kmean_pmml_basic(self):
# Most of the validation is done in the Scala side, here we just check
# that we output text rather than parquet (e.g. that the format flag
# was respected).
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
path = tempfile.mkdtemp()
km_path = path + "/km-pmml"
model.write().format("pmml").save(km_path)
pmml_text_list = self.sc.textFile(km_path).collect()
pmml_text = "\n".join(pmml_text_list)
self.assertIn("Apache Spark", pmml_text)
self.assertIn("PMML", pmml_text)
def _compare_params(self, m1, m2, param):
"""
Compare 2 ML Params instances for the given param, and assert both have the same param value
and parent. The param must be a parameter of m1.
"""
# Prevent key not found error in case of some param in neither paramMap nor defaultParamMap.
if m1.isDefined(param):
paramValue1 = m1.getOrDefault(param)
paramValue2 = m2.getOrDefault(m2.getParam(param.name))
if isinstance(paramValue1, Params):
self._compare_pipelines(paramValue1, paramValue2)
else:
self.assertEqual(paramValue1, paramValue2) # for general types param
# Assert parents are equal
self.assertEqual(param.parent, m2.getParam(param.name).parent)
else:
# If m1 is not defined param, then m2 should not, too. See SPARK-14931.
self.assertFalse(m2.isDefined(m2.getParam(param.name)))
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
- OneVsRest, OneVsRestModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams) or isinstance(m1, Transformer):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
elif isinstance(m1, OneVsRest) or isinstance(m1, OneVsRestModel):
for p in m1.params:
self._compare_params(m1, m2, p)
if isinstance(m1, OneVsRestModel):
self.assertEqual(len(m1.models), len(m2.models))
for x, y in zip(m1.models, m2.models):
self._compare_pipelines(x, y)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_python_transformer_pipeline_persistence(self):
"""
Pipeline[MockUnaryTransformer, Binarizer]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.range(0, 10).toDF('input')
tf = MockUnaryTransformer(shiftVal=2)\
.setInputCol("input").setOutputCol("shiftedInput")
tf2 = Binarizer(threshold=6, inputCol="shiftedInput", outputCol="binarized")
pl = Pipeline(stages=[tf, tf2])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_onevsrest(self):
temp_path = tempfile.mkdtemp()
df = self.spark.createDataFrame([(0.0, 0.5, Vectors.dense(1.0, 0.8)),
(1.0, 0.5, Vectors.sparse(2, [], [])),
(2.0, 1.0, Vectors.dense(0.5, 0.5))] * 10,
["label", "wt", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
def reload_and_compare(ovr, suffix):
model = ovr.fit(df)
ovrPath = temp_path + "/{}".format(suffix)
ovr.save(ovrPath)
loadedOvr = OneVsRest.load(ovrPath)
self._compare_pipelines(ovr, loadedOvr)
modelPath = temp_path + "/{}Model".format(suffix)
model.save(modelPath)
loadedModel = OneVsRestModel.load(modelPath)
self._compare_pipelines(model, loadedModel)
reload_and_compare(OneVsRest(classifier=lr), "ovr")
reload_and_compare(OneVsRest(classifier=lr).setWeightCol("wt"), "ovrw")
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_default_read_write(self):
temp_path = tempfile.mkdtemp()
lr = LogisticRegression()
lr.setMaxIter(50)
lr.setThreshold(.75)
writer = DefaultParamsWriter(lr)
savePath = temp_path + "/lr"
writer.save(savePath)
reader = DefaultParamsReadable.read()
lr2 = reader.load(savePath)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(lr.extractParamMap(), lr2.extractParamMap())
# test overwrite
lr.setThreshold(.8)
writer.overwrite().save(savePath)
reader = DefaultParamsReadable.read()
lr3 = reader.load(savePath)
self.assertEqual(lr.uid, lr3.uid)
self.assertEqual(lr.extractParamMap(), lr3.extractParamMap())
def test_default_read_write_default_params(self):
lr = LogisticRegression()
self.assertFalse(lr.isSet(lr.getParam("threshold")))
lr.setMaxIter(50)
lr.setThreshold(.75)
# `threshold` is set by user, default param `predictionCol` is not set by user.
self.assertTrue(lr.isSet(lr.getParam("threshold")))
self.assertFalse(lr.isSet(lr.getParam("predictionCol")))
self.assertTrue(lr.hasDefault(lr.getParam("predictionCol")))
writer = DefaultParamsWriter(lr)
metadata = json.loads(writer._get_metadata_to_save(lr, self.sc))
self.assertTrue("defaultParamMap" in metadata)
reader = DefaultParamsReadable.read()
metadataStr = json.dumps(metadata, separators=[',', ':'])
loadedMetadata = reader._parseMetaData(metadataStr, )
reader.getAndSetParams(lr, loadedMetadata)
self.assertTrue(lr.isSet(lr.getParam("threshold")))
self.assertFalse(lr.isSet(lr.getParam("predictionCol")))
self.assertTrue(lr.hasDefault(lr.getParam("predictionCol")))
# manually create metadata without `defaultParamMap` section.
del metadata['defaultParamMap']
metadataStr = json.dumps(metadata, separators=[',', ':'])
loadedMetadata = reader._parseMetaData(metadataStr, )
with self.assertRaisesRegexp(AssertionError, "`defaultParamMap` section not found"):
reader.getAndSetParams(lr, loadedMetadata)
# Prior to 2.4.0, metadata doesn't have `defaultParamMap`.
metadata['sparkVersion'] = '2.3.0'
metadataStr = json.dumps(metadata, separators=[',', ':'])
loadedMetadata = reader._parseMetaData(metadataStr, )
reader.getAndSetParams(lr, loadedMetadata)
if __name__ == "__main__":
from pyspark.ml.tests.test_persistence import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
dbtsai/spark
|
python/pyspark/ml/tests/test_persistence.py
|
Python
|
apache-2.0
| 19,696
|
[
"Gaussian"
] |
c36c42cde4495f3a1aad610238ce2cac564e0522e48783fa1a9d7529fb4b3c7a
|
#!/usr/bin/env python
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import json
import urllib
from urllib2 import quote as urlquote, HTTPError
from urlparse import urlparse
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.urls import open_url
from ansible.utils.color import stringc
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy
self.github_username = None
self.github_password = None
if github_token == None:
self.get_credentials()
def get_credentials(self):
display.display(u'\n\n' + "We need your " + stringc("Github login",'bright cyan') +
" to identify you.", screen_only=True)
display.display("This information will " + stringc("not be sent to Galaxy",'bright cyan') +
", only to " + stringc("api.github.com.","yellow"), screen_only=True)
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token",'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True)
try:
self.github_username = raw_input("Github Username: ")
except:
pass
try:
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
except:
pass
if not self.github_username or not self.github_password:
raise AnsibleError("Invalid Github credentials. Username and password are required.")
def remove_github_token(self):
'''
If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
retrieve the token after creation, so we are forced to create a new one.
'''
try:
tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True,))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
for token in tokens:
if token['note'] == 'ansible-galaxy login':
display.vvvvv('removing token: %s' % token['token_last_eight'])
try:
open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username,
url_password=self.github_password, method='DELETE', force_basic_auth=True,)
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
def create_github_token(self):
'''
Create a personal authorization token with a note of 'ansible-galaxy login'
'''
self.remove_github_token()
args = json.dumps({"scopes":["public_repo"], "note":"ansible-galaxy login"})
try:
data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True, data=args))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
return data['token']
|
goozbach/ansible
|
lib/ansible/galaxy/login.py
|
Python
|
gpl-3.0
| 4,490
|
[
"Galaxy"
] |
7a1a50461ee5cbc42804ce61b5b293de45679bc791c06b7a9f895ea1a12cf70b
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crystal_snake')
mobileTemplate.setLevel(65)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(5)
mobileTemplate.setHideType("Scaley Hide")
mobileTemplate.setHideAmount(2)
mobileTemplate.setSocialGroup("crystal snake")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_crystal_snake.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_4')
attacks.add('bm_hamstring_4')
attacks.add('bm_puncture_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('crystal_snake', mobileTemplate)
return
|
agry/NGECore2
|
scripts/mobiles/yavin4/crystal_snake.py
|
Python
|
lgpl-3.0
| 1,636
|
[
"CRYSTAL"
] |
962a8a78fd22b7dbe6ee38bbb25513e148814046db10b0445c506ab42d7d055c
|
from math import sqrt
from ase import Atoms, Atom
from ase.constraints import FixAtoms
from ase.optimize import FIRE, QuasiNewton, BFGS
from ase.neb import SingleCalculatorNEB
from ase.calculators.emt import EMT
Optimizer = BFGS
# Distance between Cu atoms on a (111) surface:
a = 3.6
d = a / sqrt(2)
fcc111 = Atoms(symbols='Cu',
cell=[(d, 0, 0),
(d / 2, d * sqrt(3) / 2, 0),
(d / 2, d * sqrt(3) / 6, -a / sqrt(3))],
pbc=True)
initial = fcc111 * (2, 2, 4)
initial.set_cell([2 * d, d * sqrt(3), 1])
initial.set_pbc((1, 1, 0))
initial.set_calculator(EMT())
Z = initial.get_positions()[:, 2]
indices = [i for i, z in enumerate(Z) if z < Z.mean()]
constraint = FixAtoms(indices=indices)
initial.set_constraint(constraint)
dyn = Optimizer(initial)
dyn.run(fmax=0.05)
Z = initial.get_positions()[:, 2]
print Z[0] - Z[1]
print Z[1] - Z[2]
print Z[2] - Z[3]
b = 1.2
h = 1.5
initial += Atom('C', (d / 2, -b / 2, h))
initial += Atom('O', (d / 2, +b / 2, h))
s = initial.copy()
dyn = Optimizer(initial)
dyn.run(fmax=0.05)
#view(initial)
# create final
final = initial.copy()
final.set_calculator(EMT())
final.set_constraint(constraint)
final[-2].position = final[-1].position
final[-1].x = d
final[-1].y = d / sqrt(3)
dyn = Optimizer(final)
dyn.run(fmax=0.1)
#view(final)
# create 2 intermediate step neb
neb = SingleCalculatorNEB([initial, final])
neb.refine(2)
neb.set_calculators(EMT())
assert neb.n() == 4
dyn = Optimizer(neb, maxstep=0.04, trajectory='mep_2coarse.traj')
dyn.run(fmax=0.1)
#dyn.run(fmax=39.1)
# read from the trajectory
neb = SingleCalculatorNEB('mep_2coarse.traj@-4:')
# refine in the important region
neb.refine(2, 1, 3)
neb.set_calculators(EMT())
dyn = Optimizer(neb, maxstep=0.04, trajectory='mep_2fine.traj')
dyn.run(fmax=0.1)
assert len(neb.images) == 8
|
grhawk/ASE
|
tools/ase/test/COCu111_2.py
|
Python
|
gpl-2.0
| 1,850
|
[
"ASE"
] |
0d75b2ca547cc0124e180bc78c990273c66579b0fd6ac4c465d08496d814ad07
|
raise(Exception("This script is obsolete!"))
import shutil
import os
import tarfile
import tempfile
import mdtraj
import itertools
import pandas as pd
min_num_gen = 50
source_dir = "/cbio/jclab/projects/fah/fah-data/PROJ8900/"
destination_dir = "./Trajectories/"
provenance_file = file("./provenance.csv", 'a')
traj = mdtraj.load("./system.pdb")
top, bonds = traj.top.to_dataframe()
top = top[top.chainID == 0]
atom_indices = top.index.values
k = 0
for run in itertools.count():
if not os.path.exists(source_dir + "/RUN%d/" % run):
break
for clone in itertools.count():
if not os.path.exists(source_dir + "/RUN%d/CLONE%d/" % (run, clone)):
break
for gen in itertools.count():
xtc_filename = staging_dir + "/RUN%d/CLONE%d/frame-%.3d.xtc" % (run, clone, gen)
if not os.path.exists(xtc_filename):
break
num_gen = gen
traj = mdtraj.load([source_dir + "/RUN%d/CLONE%d/frame-%.3d.xtc" % (run, clone, gen) for gen in range(num_gen)], top="./system.pdb", atom_indices=atom_indices)
if num_gen >= min_num_gen:
out_filename = destination_dir + "/trj%d.lh5" % k
traj.save_legacy_hdf(out_filename)
provenance = pd.DataFrame([[run, clone, num_gen]], index=[k], columns=["run", "clone", "num_gen"])
provenance.to_csv(provenance_file, header=False)
provenance_file.flush()
k += 1
|
hainm/MSMs
|
attic/src/code/fahprocessing/old/FAH_to_MSMB2.py
|
Python
|
gpl-2.0
| 1,499
|
[
"MDTraj"
] |
2f5bb6c2b4aad8d6fc9d25b1092f60f19d07cef6afa94bcf71e0aeaa4e312962
|
from mpi4py import MPI
from lammps import lammps
lmp = lammps()
lmp.file("in.colloid")
me=MPI.COMM_WORLD.Get_rank()
nprocs=MPI.COMM_WORLD.Get_size()
print "Proc %d out of %d procs has " % (me,nprocs),lmp
MPI.Finalize()
|
zimmermant/dlvo_lammps
|
examples/mpi_test/test.py
|
Python
|
gpl-2.0
| 219
|
[
"LAMMPS"
] |
48801c4988ed169f476e3f8d7c82ff74f8d39fae0c039d2ac28f1e54a29730f3
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
from scipy.special import expi
import matplotlib.pyplot as plt
a01 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_csv_T_0010.csv', delimiter = ',', names = True, dtype = float)
b01 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_fully_saturated_T_0010.csv', delimiter = ',', names = True, dtype = float)
c01 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_KT_T_0010.csv', delimiter = ',', names = True, dtype = float)
a06 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_csv_T_0060.csv', delimiter = ',', names = True, dtype = float)
b06 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_fully_saturated_T_0060.csv', delimiter = ',', names = True, dtype = float)
c06 = np.genfromtxt('../../../../../../test/tests/heat_advection/gold/heat_advection_1d_KT_T_0060.csv', delimiter = ',', names = True, dtype = float)
plt.figure()
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Water pressure vs similarity solution
axes[0].plot(b01['x'], b01['temp'], label = 'No upwinding')
axes[0].plot(a01['x'], a01['temp'], label = 'Full upwinding')
axes[0].plot(c01['x'], c01['temp'], label = 'KT stabilization')
axes[0].set_xlabel('x (m)')
axes[0].set_ylabel('Temperature (K)')
axes[0].grid()
axes[0].legend()
axes[0].set_title("Temperature at 0.1s")
# Gas saturation vs similarity solution
axes[1].plot(b06['x'], b06['temp'], label = 'No upwinding')
axes[1].plot(a06['x'], a06['temp'], label = 'Full upwinding')
axes[1].plot(c06['x'], c06['temp'], label = 'KT stabilization')
axes[1].set_xlabel('x (m)')
axes[1].set_ylabel('Temperature (K)')
axes[1].legend()
axes[1].grid()
axes[1].set_title("Temperature at 0.6s")
plt.tight_layout()
plt.savefig("heat_advection.png")
sys.exit(0)
|
harterj/moose
|
modules/porous_flow/doc/content/modules/porous_flow/tests/heat_advection/heat_advection.py
|
Python
|
lgpl-2.1
| 2,227
|
[
"MOOSE"
] |
65e6e3fdd5921a0c570abd349c8391c4d23ecc0e2acd223660e2b16deffebc9c
|
#!/usr/bin/env python
import vtk
import sys
from optparse import OptionParser
from vtk.util.colors import *
#
# parse command line options
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", default="fort.14",
help="file to read")
parser.add_option("-i", "--interact", dest="interact", default=False,
action="store_true", help="to enable interaction with data")
parser.add_option("-v", "--variable", dest="variable",
default="BathymetricDepth", help="variable to visualize")
parser.add_option("-a", "--annotation", dest="annotation", default="null",
help="text to place in frame")
(options, args) = parser.parse_args()
# set up offscreen rendering
#graphics_factory = vtk.vtkGraphicsFactory()
#graphics_factory.SetOffScreenOnlyMode(1)
#graphics_factory.SetUseMesaClasses(1)
#imaging_factory = vtk.vtkImagingFactory()
#imaging_factory.SetUseMesaClasses(1)
#cdp = vtk.vtkCompositeDataPipeline()
#vtkAlgorithm::SetDefaultExecutivePrototype(cdp)
# Create an unstructured grid reader
meshReader = vtk.vtkXMLUnstructuredGridReader()
meshReader.SetFileName(options.filename)
print 'Reading ',options.variable,' from ',options.filename,'.'
meshReader.SetPointArrayStatus(options.variable,1)
if options.variable != 'BathymetricDepth':
meshReader.SetPointArrayStatus('BathymetricDepth',0)
meshReader.Update()
# create actor for unstructured grid outline
meshGeometryFilter = vtk.vtkGeometryFilter()
meshGeometryFilter.SetInput(meshReader.GetOutput())
outlineMesh = vtk.vtkFeatureEdges()
outlineMesh.SetInputConnection(meshGeometryFilter.GetOutputPort())
outlineMesh.BoundaryEdgesOn()
outlineMeshMapper = vtk.vtkPolyDataMapper()
outlineMeshMapper.SetInputConnection(outlineMesh.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMeshMapper)
outlineActor.GetProperty().SetColor(1,1,1)
# create a color scale (color lookup table)
refLut = vtk.vtkLookupTable()
lut = vtk.vtkLookupTable()
refLut.SetNumberOfColors(256)
lut.SetNumberOfColors(256)
refLut.SetHueRange(0.0, 0.667)
refLut.Build()
lut.Build()
for j in range(256):
lut.SetTableValue(j, refLut.GetTableValue(255-j))
#
planeMapper = vtk.vtkDataSetMapper()
planeMapper.SetInputConnection(meshReader.GetOutputPort())
planeMapper.SetScalarRange(meshReader.GetOutput().GetScalarRange())
planeMapper.SetLookupTable(lut)
gridActor = vtk.vtkActor()
gridActor.SetMapper(planeMapper)
#gridActor.GetProperty().SetRepresentationToWireframe()
# streamline parameters for whole gulf and atlantic
seedCenterX = -33000000.0
seedCenterY = 3000000.0
seedCenterZ = 0.0
seedRadius = 3000000.0
seedNum = 10000
streamMaxPropagationTime = 160000.0
streamTubeRadius = 5000.0
# streamline parameters for just eastern nc area
#seedCenterX = -32700000.0
#seedCenterY = 3600000.0
#seedCenterZ = 0.0
#seedRadius = 500000.0
#seedNum = 10000
#streamMaxPropagationTime = 160000.0
#streamTubeRadius = 500.0
# create streamlines
if options.variable == 'WindVelocity':
print 'Generating streamlines.'
seedsSphere = vtk.vtkPointSource()
seedsSphere.SetCenter(seedCenterX, seedCenterY, seedCenterZ)
seedsSphere.SetRadius(seedRadius)
seedsSphere.SetNumberOfPoints(seedNum)
seedTransform = vtk.vtkTransform()
seedTransform.Scale(1.0,1.0,0.0)
#seedTransform.RotateZ(1.0*float(frame)) # 1 degree
seedFilter = vtk.vtkTransformPolyDataFilter()
seedFilter.SetTransform(seedTransform)
seedFilter.SetInputConnection(seedsSphere.GetOutputPort())
integ = vtk.vtkRungeKutta4()
streamer = vtk.vtkStreamTracer()
streamer.SetInputConnection(meshReader.GetOutputPort())
#streamer.SetStartPosition(0.18474886E+01, 0.12918899E+00, 0.00000000E+00)
streamer.SetSource(seedFilter.GetOutput())
streamer.SetMaximumPropagation(streamMaxPropagationTime)
#streamer.SetMaximumPropagationUnitToTimeUnit()
streamer.SetInitialIntegrationStep(1.0)
#streamer.SetInitialIntegrationStepUnitToCellLengthUnit()
#streamer.SetIntegrationDirectionToBoth()
streamer.SetIntegrator(integ)
#
streamTube = vtk.vtkTubeFilter()
streamTube.SetInputConnection(streamer.GetOutputPort())
#streamTube.SetInputArrayToProcess(1,0,0,vtkDataObject::FIELD_ASSOCIATION_POINTS, vectors)
streamTube.SetRadius(streamTubeRadius)
streamTube.SetNumberOfSides(12)
#streamTube.SetVaryRadiusToVaryRadiusByVector()
mapStreamTube = vtk.vtkPolyDataMapper()
mapStreamTube.SetInputConnection(streamTube.GetOutputPort())
mapStreamTube.SetScalarRange(meshReader.GetOutput().GetPointData().GetScalars().GetRange())
mapStreamTube.SetLookupTable(lut)
streamTubeActor = vtk.vtkActor()
streamTubeActor.SetMapper(mapStreamTube)
streamTubeActor.GetProperty().SetColor(0.0,0.0,0.0)
##streamTubeActor.GetProperty().BackfaceCullingOn()
# Create annotation
if ( options.annotation != "null" ):
ann = vtk.vtkTextActor()
ann.SetTextScaleModeToViewport()
ann.SetDisplayPosition(50,600)
ann.SetInput(options.annotation)
# specify an initial size
ann.GetPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
ann.GetPosition2Coordinate().SetValue(0.6,0.1)
# properties of text annotation
annprop = ann.GetTextProperty()
annprop.SetFontSize(36)
annprop.SetFontFamilyToArial()
#annprop.SetJustificationToCentered()
#annprop.BoldOn()
#annprop.ItalicOn()
#annprop.ShadowOn()
annprop.SetColor(0,0,0)
# Create the usual rendering stuff.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(700, 700)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.SetBackground(1.0, 1.0, 1.0)
ren.AddActor(outlineActor)
if options.variable != 'WindVelocity':
ren.AddActor(gridActor)
else:
ren.AddActor(streamTubeActor)
if ( options.annotation != "null" ):
ren.AddActor(ann)
ren.ResetCamera()
cam = ren.GetActiveCamera()
cam.Zoom(1.3)
if options.interact == False:
renWin.OffScreenRenderingOn()
renWin.Render()
# Interact with the data.
if options.interact == True:
iren.Initialize()
iren.Start()
else:
# write a png
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(options.filename + ".png")
writer.SetInput(w2if.GetOutput())
writer.Write()
#integ = vtk.vtkRungeKutta4()
#streamer = vtk.vtkStreamTracer()
##streamer.SetInputConnection(meshReader.GetOutputPort())
##streamer.SetStartPosition(-0.77236544E+02, 0.28891293E+02, 0.00000000E+00)
#streamer.SetSource(seedFilter.GetOutput())
#streamer.SetMaximumPropagation(100)
##streamer.SetMaximumPropagationUnitToTimeUnit()
#streamer.SetInitialIntegrationStep(1.0)
#streamer.SetInitialIntegrationStepUnitToCellLengthUnit()
#streamer.SetIntegrationDirectionToBoth()
#streamer.SetIntegrator(integ)
#streamTube = vtk.vtkTubeFilter()
#streamTube.SetInputConnection(streamer.GetOutputPort())
#streamTube.SetInputArrayToProcess(1,0,0,vtkDataObject::FIELD_ASSOCIATION_POINTS, vectors)
#streamTube.SetRadius(10.0)
#streamTube.SetNumberOfSides(12)
#streamTube.SetVaryRadiusToVaryRadiusByVector()
#streamWarp = vtk.vtkWarpScalar()
#streamWarp.SetInputConnection(streamTube.GetOutputPort())
#streamWarp.SetNormal(0.0,0.0,1.0)
#streamWarp.UseNormalOn()
#streamWarp.SetScaleFactor(10.0)
#mapStreamTube = vtk.vtkPolyDataMapper()
#mapStreamTube.SetInputConnection(streamWarp.GetOutputPort())
#mapStreamTube.SetScalarRange(meshReader.GetOutput().GetPointData().GetScalars().GetRange())
#mapStreamTube.SetLookupTable(lut)
#streamTubeActor = vtk.vtkActor()
#streamTubeActor.SetMapper(mapStreamTube)
##streamTubeActor.GetProperty().BackfaceCullingOn()
# This creates a blue to red lut.
#lut.SetHueRange(0.667, 0.0)
# Create the rendering window, renderer, and interactive renderer
#ren = vtk.vtkRenderer()
#cam = ren.GetActiveCamera()
##cam.Elevation(-30)
#cam.Pitch(30)
#renWin = vtk.vtkRenderWindow()
#renWin.AddRenderer(ren)
#iren = vtk.vtkRenderWindowInteractor()
#iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#ren.AddActor(gridActor)
#ren.AddActor(outlineActor)
#ren.AddActor(streamTubeActor)
#ren.AddActor(warpActor)
#ren.SetBackground(0, 0, 0)
#renWin.SetSize(650, 650)
#if ( interact == 0 ):
# renWin.OffScreenRenderingOn()
#cam.Dolly(10)
#ren.ResetCamera()
#cam.Zoom(1.5)
#renWin.Render()
# write a png
#w2if = vtk.vtkWindowToImageFilter()
#w2if.SetInput(renWin)
#w2if.Update()
#writer = vtk.vtkPNGWriter()
#filename = 'spatial_data_%03d.png' % int(frame)
#writer.SetFileName(filename)
#writer.SetInput(w2if.GetOutput())
#writer.Write()
#px = vtk.vtkPOVExporter()
#px.SetFileName("stuff.pov")
#px.SetRenderWindow(renWin)
#px.Write()
# Interact with the data.
#if (interact == 1):
# iren.Initialize()
# iren.Start()
|
jasonfleming/asgs
|
output/adcirc_xml.py
|
Python
|
gpl-3.0
| 8,844
|
[
"VTK"
] |
28f9ade6430f2ee9c0ee8aa64036d8d19aa911335d1de96006ced449afce82b3
|
../../../../../../../share/pyshared/orca/scripts/toolkits/WebKitGtk/script_settings.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/toolkits/WebKitGtk/script_settings.py
|
Python
|
gpl-3.0
| 86
|
[
"ORCA"
] |
32c06b2ac6d21fde571962521454b9db0a303b712b2469607fa872f164c5369b
|
# -*- coding: utf-8 -*-
u"""
==================================
Input and output (:mod:`scipy.io`)
==================================
.. currentmodule:: scipy.io
SciPy has many modules, classes, and functions available to read data
from and write data to a variety of file formats.
.. seealso:: `NumPy IO routines <https://www.numpy.org/devdocs/reference/routines.io.html>`__
MATLAB® files
=============
.. autosummary::
:toctree: generated/
loadmat - Read a MATLAB style mat file (version 4 through 7.1)
savemat - Write a MATLAB style mat file (version 4 through 7.1)
whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
IDL® files
==========
.. autosummary::
:toctree: generated/
readsav - Read an IDL 'save' file
Matrix Market files
===================
.. autosummary::
:toctree: generated/
mminfo - Query matrix info from Matrix Market formatted file
mmread - Read matrix from Matrix Market formatted file
mmwrite - Write matrix to Matrix Market formatted file
Unformatted Fortran files
===============================
.. autosummary::
:toctree: generated/
FortranFile - A file object for unformatted sequential Fortran files
FortranEOFError - Exception indicating the end of a well-formed file
FortranFormattingError - Exception indicating an inappropriate end
Netcdf
======
.. autosummary::
:toctree: generated/
netcdf_file - A file object for NetCDF data
netcdf_variable - A data object for the netcdf module
Harwell-Boeing files
====================
.. autosummary::
:toctree: generated/
hb_read -- read H-B file
hb_write -- write H-B file
Wav sound files (:mod:`scipy.io.wavfile`)
=========================================
.. module:: scipy.io.wavfile
.. autosummary::
:toctree: generated/
read
write
WavFileWarning
Arff files (:mod:`scipy.io.arff`)
=================================
.. module:: scipy.io.arff
.. autosummary::
:toctree: generated/
loadarff
MetaData
ArffError
ParseArffError
"""
# matfile read and write
from ._matlab import loadmat, savemat, whosmat, byteordercodes
# netCDF file support
from ._netcdf import netcdf_file, netcdf_variable
# Fortran file support
from ._fortran import FortranFile, FortranEOFError, FortranFormattingError
from ._mmio import mminfo, mmread, mmwrite
from ._idl import readsav
from ._harwell_boeing import hb_read, hb_write
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
grlee77/scipy
|
scipy/io/__init__.py
|
Python
|
bsd-3-clause
| 2,575
|
[
"NetCDF"
] |
3ac687fb9d228568347fa25eb0067d2a458a74b11ca7d752529144b48b48a158
|
#!/usr/bin/env python
"""
Vision demo configuration routines.
"""
import csv
import os
import collections
import cPickle as pickle
import numpy as np
import gzip
import networkx as nx
from neurokernel.LPU.LPU import LPU
from neurokernel.pattern import Pattern
import neurokernel.plsel as plsel
class hex_array(object):
"""
0 1 2 3 4
----------------------> cols (X=cols*sqrt(3))
0| 0 2 4
| 1 3
1| 5 7 9
| 6 8
2| 10 12 14
| 11 13
|
V
rows (first col: 0,2,4,6)
(Y=2*row if col is even else Y=2*row+1 )
"""
def __init__(self, nrows, ncols):
self.nrows = nrows
self.ncols = ncols
self.num_elements = nrows * ncols
self.X = np.tile(np.arange(self.ncols, dtype = np.double).reshape((1, self.ncols))*np.sqrt(3),
(self.nrows, 1))
if (self.ncols % 2 == 0):
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2))
else:
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2+1))
self.Y = self.Y[:,0:-1]
self.col = np.tile(np.arange(self.ncols, dtype = np.int32).reshape((1, self.ncols)),
(self.nrows, 1))
self.row = np.tile(np.arange(self.nrows, dtype = np.int32).reshape((self.nrows, 1)),
(1, self.ncols))
#self.Y = self.Y + np.tile(np.asarray([0, 1]),
# (self.nrows, self.ncols/2))
self.col = self.col.reshape(-1)
self.row = self.row.reshape(-1)
self.num = np.arange(self.num_elements, dtype = np.int32).reshape(nrows, ncols)
def find_neighbor(self, row, col):
"""
neighbors are defined relatively as
1
2 6
0
3 5
4
"""
if col < 0 or col >= self.ncols:
raise ValueError("column number " + str(col) + " exceeds array limit")
if row < 0 or row >= self.nrows:
raise ValueError("row number " + str(row) + " exceeds array limit")
# adding neighbor 0 (self)
neighbor = [self.num[row, col]]
# adding neighbor 1
neighbor.append(self.num[row-1, col] if row != 0 else None)
# adding neighbor 2, 3
if col == 0:
neighbor.extend([None, None])
elif col % 2 == 0:
if row == 0:
neighbor.extend([None, self.num[row, col-1]])
else:
neighbor.extend(list(self.num[row-1:row+1, col-1]))
else:
if row == self.nrows-1:
neighbor.extend([self.num[row, col-1], None])
else:
neighbor.extend(list(self.num[row:row+2, col-1]))
# adding neighbor 4
neighbor.append(self.num[row+1, col] if row != self.nrows-1 else None)
# adding neighbor 5, 6
if col == self.ncols-1:
neighbor.extend([None, None])
elif col % 2 == 0:
if row == 0:
neighbor.extend([self.num[row, col+1], None])
else:
neighbor.extend(
list(self.num[row:row-2 if row-2 >= 0 else None:-1, col+1]))
else:
if row == self.nrows-1:
neighbor.extend([None, self.num[row, col+1]])
else:
neighbor.extend(
list(self.num[row+1:row-1 if row-1 >= 0 else None:-1, col+1]))
return neighbor
class vision_LPU(object):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
LPU_name):
self.nrows = nrows
self.ncols = ncols
self.num_cartridges = nrows * ncols
self.neuron_csv = neuron_csv
self.columnar_synapse_csv = columnar_synapse_csv
self.other_synapse_csv = other_synapse_csv
self.hexarray = hex_array(nrows, ncols)
self._connected = False
self.LPU_name = LPU_name
self.composition_rules = []
# def read_neurons(self):
# read in csv file and turn it into a numpy structured array
neuron_list = []
dtypes = [np.dtype('S10'), np.dtype('S32'),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.int32), np.dtype(np.int32),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double)]
with open(self.neuron_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
self.neuron_field_name = reader.next()
n_entry = len(self.neuron_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
neuron_list.append(tuple(tmp))
self.num_neuron_types = len(neuron_list)
self.neuron_dict = np.array(
neuron_list,
dtype = [(a, b) for a, b in zip(self.neuron_field_name, dtypes)])
# def read_synapses(self):
# read in csv file and turn it into a numpy structured array
if self.columnar_synapse_csv is not None:
synapse_list = []
dtypes = [np.dtype('S10'), np.dtype('S10'),
np.dtype('S32'),
np.dtype(np.int32), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.int32)]
with open(self.columnar_synapse_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
synapse_field_name = reader.next()
n_entry = len(synapse_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
synapse_list.append(tuple(tmp))
self.num_synapse_types = len(synapse_list)
self.synapse_dict = np.array(
synapse_list,
dtype = [(a, b) for a, b in zip(synapse_field_name, dtypes)])
else:
# TODO: will fail later if synapse_dict is empty
self.num_synapse_types = 0
self.synapse_dict = []
if self.other_synapse_csv is not None:
synapse_list = []
dtypes = [np.dtype('S10'), np.dtype('S10'),
np.dtype('S32'),
np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.double), np.dtype(np.double),
np.dtype(np.int32)]
with open(self.other_synapse_csv, 'rU') as csvfile:
reader = csv.reader(csvfile)
synapse_field_name = reader.next()
n_entry = len(synapse_field_name)
for row in reader:
tmp = [dtypes[i].type(row[i]) for i in range(n_entry)]
synapse_list.append(tuple(tmp))
self.num_other_synapse_types = len(synapse_list)
self.other_synapse_dict = np.array(
synapse_list,
dtype = [(a, b) for a, b in zip(synapse_field_name, dtypes)])
else:
self.num_other_synapse_types = 0
self.other_synapse_dict = []
def create_cartridges(self):
# create a number of cartridges
self.cartridge_neuron_dict = self.neuron_dict[self.neuron_dict['columnar'] == 1]
self.cartridge_synapse_dict = self.synapse_dict[self.synapse_dict['cart'] == 0]
self.cartridges = []
for _ in range(self.num_cartridges):
self.cartridges.append(
Cartridge(self.cartridge_neuron_dict,
self.cartridge_synapse_dict))
def connect_cartridges(self):
# connect cartridge from their neighbors
if not hasattr(self, 'cartridges'):
raise AttributeError("Need to create cartridges before connecting them")
count = 0
for cartridge in self.cartridges:
row = np.asscalar(self.hexarray.row[count])
col = np.asscalar(self.hexarray.col[count])
cartridge.assign_pos(count, row, col,
np.asscalar(self.hexarray.X[row,col]),
np.asscalar(self.hexarray.Y[row,col]))
neighbor_num = self.hexarray.find_neighbor(row, col)
cartridge.set_neighbors(
[self.cartridges[num] if num is not None else None
for num in neighbor_num])
count += 1
self._connected = True
def create_non_columnar_neurons(self):
self.non_columnar_neurons = collections.OrderedDict()
self.non_columnar_neuron_list = self.neuron_dict[self.neuron_dict['columnar'] != 1]
dtnames = self.non_columnar_neuron_list.dtype.names
for neuron_dict in self.non_columnar_neuron_list:
name = neuron_dict['name']
self.non_columnar_neurons.update({name: []})
for _ in range(neuron_dict['columnar']):
self.non_columnar_neurons[name].append(
Neuron(dict(zip(dtnames, [np.asscalar(p) for p in neuron_dict]))))
def remove_cartridge(self, num):
pass
def remove_neuron_type(self, name):
pass
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'LPU unconfigured'
def export_to_gexf(self, filename):
g = nx.MultiDiGraph()
num = 0
for neuron_type in self.neuron_dict:
if not neuron_type['dummy']:
if neuron_type['columnar'] == 1:
name = neuron_type['name']
for cartridge in self.cartridges:
neuron = cartridge.neurons[name]
neuron.add_num(num)
neuron.process_before_export()
g.add_node(num, neuron.params)
num += 1
for name in self.non_columnar_neurons.iterkeys():
for neuron in self.non_columnar_neurons[name]:
neuron.add_num(num)
neuron.process_before_export()
g.add_node(num, neuron.params)
num += 1
for cartridge in self.cartridges:
for synapse in cartridge.synapses:
synapse.process_before_export()
g.add_edge(synapse.pre_neuron.num, synapse.post_neuron.num,
attr_dict = synapse.params)
for cr in self.composition_rules:
for synapse in cr['synapses']:
synapse.process_before_export()
g.add_edge(synapse.pre_neuron.num, synapse.post_neuron.num,
attr_dict = synapse.params)
if isinstance(filename, str):
name, ext = os.path.splitext(filename)
if name == '':
raise ValueError("Please specify a valid filename")
if ext == '.gz':
with gzip.open(filename, 'w') as f:
nx.write_gexf(g, f, prettyprint=True)
else:
if ext != '.gexf':
name = filename + '.gexf'
else:
name = filename
nx.write_gexf(g, name, prettyprint=True)
else:
raise ValueError("Specify the filename in string")
def add_selectors(self):
for neuron_type in self.neuron_dict:
if not neuron_type['dummy']:
if neuron_type['columnar'] == 1:
if neuron_type['public'] == 1:
name = neuron_type['name']
for cartridge in self.cartridges:
neuron = cartridge.neurons[name]
neuron.add_selector(
'/'+self.LPU_name+'/cart{0}'.format(cartridge.num)
+'/'+name)
for name in self.non_columnar_neurons.iterkeys():
count = 0
for neuron in self.non_columnar_neurons[name]:
if neuron.is_public():
neuron.add_selector(
'/'+self.LPU_name+'/'+name+'[{0}]'.format(count))
count += 1
class Lamina(vision_LPU):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv):
super(Lamina, self).__init__(nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
'lamina')
def connect_composition_II(self):
# create synapses defined in composition rule II.
if not self._connected:
raise AttributeError("Need to connect cartridges before setting interconnects")
self.rule2synapses = self.synapse_dict[self.synapse_dict['cart'] != 0]
synapse_list = []
dtnames = self.rule2synapses.dtype.names
for cartridge in self.cartridges:
for synapse_array in self.rule2synapses:
neighbor_num = synapse_array['cart']
if cartridge.neighbors[neighbor_num] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons[synapse_array['prename']],
cartridge.neighbors[neighbor_num].neurons[synapse_array['postname']])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list})
def connect_composition_I(self):
am_list = self.non_columnar_neurons['Am']
synapse_list = []
n_amacrine = len(am_list) # self.non_columnar_neuron_number['Am']
am_xpos = np.random.random(n_amacrine)*self.hexarray.X[-1,-1]
am_ypos = np.random.random(n_amacrine)*self.hexarray.Y[-1,-1]
count = 0
for neuron in am_list:
neuron.assign_pos(np.asscalar(am_xpos[count]),
np.asscalar(am_ypos[count]))
count += 1
bound = 4.0
alpha_profiles = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
fill = np.zeros((n_amacrine, self.num_cartridges), np.int32);
count = 0
for cartridge in self.cartridges:
xpos = cartridge.xpos
ypos = cartridge.ypos
#calculate distance and find amacrine cells within
#distance defined by bound
dist = np.sqrt((xpos-am_xpos)**2 + (ypos-am_ypos)**2)
suitable_am = np.nonzero(dist <= bound)[0]
# if less than 4 neurons in the bound, get
# the 4 closest amacrine cells
if suitable_am.size < 4:
suitable_am = np.argsort(dist)[0:4]
for name in alpha_profiles:
assigned = False
for am_num in np.random.permutation(suitable_am):
if fill[am_num, count] < 3:
fill[am_num, count] += 1
#a1-a6 do not have synapses outside a cartridge
synapses = cartridge.replace_dummy(name, am_list[am_num])
synapse_list.extend(synapses)
assigned = True
break
if not assigned:
print name + ' in cartridge ' + str(cartridge.num) + ' not assigned'
count += 1
self.fill = fill
self.composition_rules.append( {'synapses': synapse_list} )
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'Lamina LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'Lamina LPU unconfigured'
class Medulla(vision_LPU):
def __init__(self, nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv):
super(Medulla, self).__init__(nrows, ncols, neuron_csv,
columnar_synapse_csv, other_synapse_csv,
'medulla')
def connect_composition_I(self):
if not self._connected:
raise AttributeError("Need to connect cartridges before setting interconnects")
self.rule1synapses = self.synapse_dict[self.synapse_dict['cart'] != 0]
synapse_list = []
dtnames = self.rule1synapses.dtype.names
for cartridge in self.cartridges:
for synapse_array in self.rule1synapses:
neighbor_num = synapse_array['cart']
if cartridge.neighbors[neighbor_num] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons[synapse_array['prename']],
cartridge.neighbors[neighbor_num].neurons[synapse_array['postname']])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list})
def connect_composition_II(self):
synapse_list = []
rule2synapses = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Dm3']
dtnames = rule2synapses.dtype.names
synapse_array = rule2synapses[0]
for cartridge in self.cartridges:
if cartridge.neighbors[2] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[2].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[3] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[3].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[5] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[5].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[6] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[6].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[2] is not None:
if cartridge.neighbors[2].neighbors[3] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[2].neighbors[3].neurons['Dm3'])
synapse_list.append(synapse)
elif cartridge.neighbors[3] is not None:
if cartridge.neighbors[3].neighbors[2] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[3].neighbors[2].neurons['Dm3'])
synapse_list.append(synapse)
if cartridge.neighbors[5] is not None:
if cartridge.neighbors[5].neighbors[6] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[5].neighbors[6].neurons['Dm3'])
synapse_list.append(synapse)
elif cartridge.neighbors[6] is not None:
if cartridge.neighbors[6].neighbors[5] is not None:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_array])))
synapse.link(
cartridge.neurons['L2'],
cartridge.neighbors[6].neighbors[5].neurons['Dm3'])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list})
def connect_composition_III(self):
synapse_list = []
Mt3v_list = self.non_columnar_neurons['Mt3v']
Mt3h_list = self.non_columnar_neurons['Mt3h']
for neuron in Mt3v_list:
neuron.assign_pos(0., 0.)
for neuron in Mt3h_list:
neuron.assign_pos(0., 0.)
rule3synapsesv = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3v']
rule3synapsesh = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3h']
dtnames = rule3synapsesv.dtype.names
for cartridge in self.cartridges:
synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesv[0]])))
mtn = int(np.floor(cartridge.neurons['L2'].ypos / ((self.hexarray.Y[-1][-1]+1)/4)))
synapse.link(cartridge.neurons['L2'], Mt3v_list[mtn])
synapse_list.append(synapse)
synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesh[0]])))
mtn = int(np.floor(cartridge.neurons['L2'].xpos / ((self.hexarray.X[-1][-1]+1)/4)))
synapse.link(cartridge.neurons['L2'], Mt3h_list[mtn])
synapse_list.append(synapse)
self.composition_rules.append({'synapses': synapse_list})
def __repr__(self):
if hasattr(self, 'cartridges'):
return 'Medulla LPU with '+str(len(self.cartridges))+' cartridges'
else:
return 'Medulla LPU unconfigured'
class Cartridge(object):
def __init__(self, neuron, connection):
self.connected = False
self.neuron_list = neuron.copy()
self.synapse_list = connection.copy()
self.neurons = collections.OrderedDict()
dtnames = self.neuron_list.dtype.names
for neuron_dict in self.neuron_list:
self.neurons.update(
{neuron_dict['name']:
Neuron(dict(zip(dtnames, [np.asscalar(p) for p in neuron_dict])))})
dtnames = self.synapse_list.dtype.names
self.synapses = []
for synapse_dict in self.synapse_list:
synapse = Synapse(
dict(zip(dtnames, [np.asscalar(p) for p in synapse_dict])))
synapse.link(self.neurons[synapse.prename],
self.neurons[synapse.postname])
self.synapses.append(synapse)
def set_neighbors(self, neighbor_cartridges):
self.neighbors = []
for i in range(7):
self.neighbors.append(neighbor_cartridges[i])
def assign_pos(self, num, row, col, xpos, ypos):
self.num = num
self.row = row
self.col = col
self.xpos = xpos
self.ypos = ypos
for neurons in self.neurons:
self.neurons[neurons].assign_pos(xpos, ypos)
self.connected = True
def position(self):
return (self.xpos, self.ypos)
def __repr__(self):
if self.connected:
return 'Cartridge at ' + str(self.position())
else:
return 'Isolated cartridge at '+ hex(id(self))
def get_num(self):
return self.num
def get_xpos(self):
return self.xpos
def get_ypos(self):
return self.ypos
def replace_dummy(self, name, neuron):
removed_synapse_list = []
neuron_to_be_replaced = self.neurons[name]
if not neuron_to_be_replaced.dummy:
raise ValueError("Neuron to be replaced is not dummy element")
for synapse in neuron_to_be_replaced.outgoing_synapses:
flag = self.remove_synapse(synapse)
synapse.replace_pre(neuron)
if flag:
removed_synapse_list.append(synapse)
for synapse in neuron_to_be_replaced.incoming_synapses:
flag = self.remove_synapse(synapse)
synapse.replace_post(neuron)
if flag:
removed_synapse_list.append(synapse)
self.neurons[name].set_parent(neuron)
#self.remove_neuron(name)
return removed_synapse_list
def remove_neuron(self, name):
self.neurons.pop(name)
def remove_synapse(self, synapse):
# the try/except here is to deal with Am to Am connection that
# may have been removed previously by another Am in the same cartridge
try:
self.synapses.remove(synapse)
return True
except:
return False
class Neuron(object):
def __init__(self, param_dict):
self.params = param_dict.copy()
spiking = False
self.params.update({'spiking': spiking})
if 'dummy' in self.params.keys():
self.dummy = self.params.pop('dummy')
else:
self.dummy = False
self.outgoing_synapses = []
self.incoming_synapses = []
@property
def name(self):
return self.params['name']
def add_outgoing_synapse(self, synapse):
self.outgoing_synapses.append(synapse)
def add_incoming_synapse(self, synapse):
self.incoming_synapses.append(synapse)
def remove_outgoing_synapse(self, synapse):
self.outgoing_synapses.remove(synapse)
def remove_incoming_synapse(self, synapse):
self.incoming_synapses.remove(synapse)
def __repr__(self):
return 'neuron '+self.params['name']+': '+str(self.params)
def __str__(self):
return 'neuron '+str(self.params['name'])
def assign_pos(self, xpos, ypos):
self.params.update({'xpos': xpos, 'ypos': ypos})
def position(self):
return (self.params['xpos'], self.params['ypos'])
@property
def xpos(self):
return self.params['xpos']
@property
def ypos(self):
return self.params['ypos']
def add_num(self, num):
self.num = num
def process_before_export(self):
self.params.update({'n_dendrites': len(self.incoming_synapses),
'n_outputs': len(self.outgoing_synapses)})
if 'columnar' in self.params.keys():
del self.params['columnar']
self.params['input'] = bool(self.params['input'])
self.params['output'] = bool(self.params['output'])
self.params['public'] = bool(self.params['public'])
self.params['extern'] = bool(self.params['extern'])
self.params['model'] = str(self.params['model'])
def is_public(self):
return self.params['public']
def add_selector(self, selector):
self.params['selector'] = selector
@property
def selector(self):
return self.params['selector']
def set_parent(self, neuron):
self.parent = neuron
class Synapse(object):
def __init__(self, param_dict):
self.params = param_dict.copy()
self.params.update({'conductance': True})
def link(self, pre_neuron, post_neuron):
self.pre_neuron = pre_neuron
self.post_neuron = post_neuron
self.pre_neuron.add_outgoing_synapse(self)
self.post_neuron.add_incoming_synapse(self)
self.update_class(self.get_class(self.pre_neuron, self.post_neuron))
def replace_pre(self, pre_neuron):
self.pre_neuron = pre_neuron
self.pre_neuron.add_outgoing_synapse(self)
self.params['prename'] = pre_neuron.name
def replace_post(self, post_neuron):
self.post_neuron = post_neuron
self.post_neuron.add_incoming_synapse(self)
self.params['postname'] = post_neuron.name
def __repr__(self):
return ('synapse from '+self.params['prename']+' to ' + self.params['postname']
+ ': '+str(self.params))
def __str__(self):
return 'synapse '+str(self.params['prename'])+' to '+self.params['postname']
def process_before_export(self):
if 'cart' in self.params.keys():
del self.params['cart']
if 'scale' in self.params.keys():
self.params['slope'] *= self.params['scale']
self.params['saturation'] *= self.params['scale']
del self.params['scale']
self.params['model'] = str(self.params['model'])
@staticmethod
def get_class(preneuron, postneuron):
""" preneuron: Neuron instance
postneuron: Neuron instance
"""
is_pre_spk = preneuron.params['spiking']
is_post_spk = postneuron.params['spiking']
if is_pre_spk and is_post_spk:
return 0
elif is_pre_spk and not is_post_spk:
return 1
elif not is_pre_spk and is_post_spk:
return 2
elif not is_pre_spk and not is_post_spk:
return 3
def update_class(self, cls):
self.params.update({'class': cls})
@property
def prename(self):
return self.params['prename']
@property
def postname(self):
return self.params['postname']
def create_pattern(n_dict_1, n_dict_2, save_as=None):
"""
If `save_as` is not None, save the pattern as the specified file name.
"""
lpu1_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_1))
lpu1_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_1))
lpu2_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_2))
lpu2_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_2))
lpu1_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_1))
lpu1_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_1))
lpu2_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_2))
lpu2_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_2))
lpu1_sel_out = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_out_spike)
lpu2_sel_out = plsel.Selector.union(lpu2_sel_out_gpot, lpu2_sel_out_spike)
lpu1_sel_in = plsel.Selector.union(lpu1_sel_in_gpot, lpu1_sel_in_spike)
lpu2_sel_in = plsel.Selector.union(lpu2_sel_in_gpot, lpu2_sel_in_spike)
lpu1_sel = plsel.Selector.union(lpu1_sel_out, lpu1_sel_in)
lpu2_sel = plsel.Selector.union(lpu2_sel_out, lpu2_sel_in)
pat = Pattern(lpu1_sel, lpu2_sel)
pat.interface[lpu1_sel_in_gpot, 'io', 'type'] = ['in', 'gpot']
pat.interface[lpu1_sel_out_gpot, 'io', 'type'] = ['out', 'gpot']
pat.interface[lpu2_sel_in_gpot, 'io', 'type'] = ['in', 'gpot']
pat.interface[lpu2_sel_out_gpot, 'io', 'type'] = ['out', 'gpot']
pat.interface[lpu1_sel_in_spike, 'io', 'type'] = ['in', 'spike']
pat.interface[lpu1_sel_out_spike, 'io', 'type'] = ['out', 'spike']
pat.interface[lpu2_sel_in_spike, 'io', 'type'] = ['in', 'spike']
pat.interface[lpu2_sel_out_spike, 'io', 'type'] = ['out', 'spike']
Neuron_list_12 = ['L1', 'L2', 'L3', 'L4', 'L5', 'T1']
Neuron_list_21 = ['C2', 'C3']
for i in range(768):
for neuron in Neuron_list_12:
pat['/lamina/cart'+str(i)+'/'+neuron, '/medulla/cart'+str(i)+'/'+neuron] = 1
for neuron in Neuron_list_21:
pat['/medulla/cart'+str(i)+'/'+neuron, '/lamina/cart'+str(i)+'/'+neuron] = 1
if save_as:
with open(save_as, 'wb') as pat_file:
pickle.dump(pat, pat_file)
return pat
def append_field(rec, name, arr, dtype=None):
arr = np.asarray(arr)
if dtype is None:
dtype = arr.dtype
newdtype = np.dtype(rec.dtype.descr + [(name, dtype)])
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
newrec[name] = arr
return newrec
|
cerrno/neurokernel
|
examples/vision/data/vision_configuration.py
|
Python
|
bsd-3-clause
| 33,107
|
[
"NEURON"
] |
feb26236747f7cb303e0e07a835d4dce2b0eca38a66dc4784b798dcf8a446a90
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy as sp
import matplotlib.text as mpl_text
import traits.api as t
import traitsui.api as tu
from traitsui.menu import OKButton, CancelButton
from pyface.message_dialog import information
from hyperspy import components1d
from hyperspy.component import Component
from hyperspy import drawing
from hyperspy.gui.tools import (SpanSelectorInSignal1D,
SpanSelectorInSignal1DHandler,
OurOKButton,
OurFindButton,
OurPreviousButton,
OurApplyButton)
import hyperspy.gui.messages as messages
class BackgroundRemoval(SpanSelectorInSignal1D):
background_type = t.Enum(
'Power Law',
'Gaussian',
'Offset',
'Polynomial',
default='Power Law')
polynomial_order = t.Range(1, 10)
fast = t.Bool(True,
desc=("Perform a fast (analytic, but possibly less accurate)"
" estimation of the background. Otherwise use "
"non-linear least squares."))
background_estimator = t.Instance(Component)
bg_line_range = t.Enum('from_left_range',
'full',
'ss_range',
default='full')
hi = t.Int(0)
view = tu.View(
tu.Group(
'background_type',
'fast',
tu.Group(
'polynomial_order',
visible_when='background_type == \'Polynomial\''), ),
buttons=[OKButton, CancelButton],
handler=SpanSelectorInSignal1DHandler,
title='Background removal tool',
resizable=True,
width=300,
)
def __init__(self, signal):
super(BackgroundRemoval, self).__init__(signal)
self.set_background_estimator()
self.bg_line = None
def on_disabling_span_selector(self):
if self.bg_line is not None:
self.bg_line.close()
self.bg_line = None
def set_background_estimator(self):
if self.background_type == 'Power Law':
self.background_estimator = components1d.PowerLaw()
self.bg_line_range = 'from_left_range'
elif self.background_type == 'Gaussian':
self.background_estimator = components1d.Gaussian()
self.bg_line_range = 'full'
elif self.background_type == 'Offset':
self.background_estimator = components1d.Offset()
self.bg_line_range = 'full'
elif self.background_type == 'Polynomial':
self.background_estimator = components1d.Polynomial(
self.polynomial_order)
self.bg_line_range = 'full'
def _polynomial_order_changed(self, old, new):
self.background_estimator = components1d.Polynomial(new)
self.span_selector_changed()
def _background_type_changed(self, old, new):
self.set_background_estimator()
self.span_selector_changed()
def _ss_left_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def _ss_right_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def create_background_line(self):
self.bg_line = drawing.signal1d.Signal1DLine()
self.bg_line.data_function = self.bg_to_plot
self.bg_line.set_line_properties(
color='blue',
type='line',
scaley=False)
self.signal._plot.signal_plot.add_line(self.bg_line)
self.bg_line.autoscale = False
self.bg_line.plot()
def bg_to_plot(self, axes_manager=None, fill_with=np.nan):
# First try to update the estimation
self.background_estimator.estimate_parameters(
self.signal, self.ss_left_value, self.ss_right_value,
only_current=True)
if self.bg_line_range == 'from_left_range':
bg_array = np.zeros(self.axis.axis.shape)
bg_array[:] = fill_with
from_index = self.axis.value2index(self.ss_left_value)
bg_array[from_index:] = self.background_estimator.function(
self.axis.axis[from_index:])
to_return = bg_array
elif self.bg_line_range == 'full':
to_return = self.background_estimator.function(self.axis.axis)
elif self.bg_line_range == 'ss_range':
bg_array = np.zeros(self.axis.axis.shape)
bg_array[:] = fill_with
from_index = self.axis.value2index(self.ss_left_value)
to_index = self.axis.value2index(self.ss_right_value)
bg_array[from_index:] = self.background_estimator.function(
self.axis.axis[from_index:to_index])
to_return = bg_array
if self.signal.metadata.Signal.binned is True:
to_return *= self.axis.scale
return to_return
def span_selector_changed(self):
if self.ss_left_value is np.nan or self.ss_right_value is np.nan or\
self.ss_right_value <= self.ss_left_value:
return
if self.background_estimator is None:
return
if self.bg_line is None and \
self.background_estimator.estimate_parameters(
self.signal, self.ss_left_value,
self.ss_right_value,
only_current=True) is True:
self.create_background_line()
else:
self.bg_line.update()
def apply(self):
self.signal._plot.auto_update_plot = False
new_spectra = self.signal._remove_background_cli(
(self.ss_left_value, self.ss_right_value),
self.background_estimator, fast=self.fast)
self.signal.data = new_spectra.data
self.signal._replot()
self.signal._plot.auto_update_plot = True
class SpikesRemovalHandler(tu.Handler):
def close(self, info, is_ok):
# Removes the span selector from the plot
info.object.span_selector_switch(False)
return True
def apply(self, info, *args, **kwargs):
"""Handles the **Apply** button being clicked.
"""
obj = info.object
obj.is_ok = True
if hasattr(obj, 'apply'):
obj.apply()
return
def find(self, info, *args, **kwargs):
"""Handles the **Next** button being clicked.
"""
obj = info.object
obj.is_ok = True
if hasattr(obj, 'find'):
obj.find()
return
def back(self, info, *args, **kwargs):
"""Handles the **Next** button being clicked.
"""
obj = info.object
obj.is_ok = True
if hasattr(obj, 'find'):
obj.find(back=True)
return
class SpikesRemoval(SpanSelectorInSignal1D):
interpolator_kind = t.Enum(
'Linear',
'Spline',
default='Linear',
desc="the type of interpolation to use when\n"
"replacing the signal where a spike has been replaced")
threshold = t.Float(desc="the derivative magnitude threshold above\n"
"which to find spikes")
click_to_show_instructions = t.Button()
show_derivative_histogram = t.Button()
spline_order = t.Range(1, 10, 3,
desc="the order of the spline used to\n"
"connect the reconstructed data")
interpolator = None
default_spike_width = t.Int(5,
desc="the width over which to do the interpolation\n"
"when removing a spike (this can be "
"adjusted for each\nspike by clicking "
"and dragging on the display during\n"
"spike replacement)")
index = t.Int(0)
add_noise = t.Bool(True,
desc="whether to add noise to the interpolated\nportion"
"of the spectrum. The noise properties defined\n"
"in the Signal metadata are used if present,"
"otherwise\nshot noise is used as a default")
thisOKButton = tu.Action(name="OK",
action="OK",
tooltip="Close the spikes removal tool")
thisApplyButton = tu.Action(name="Remove spike",
action="apply",
tooltip="Remove the current spike by "
"interpolating\n"
"with the specified settings (and find\n"
"the next spike automatically)")
thisFindButton = tu.Action(name="Find next",
action="find",
tooltip="Find the next (in terms of navigation\n"
"dimensions) spike in the data.")
thisPreviousButton = tu.Action(name="Find previous",
action="back",
tooltip="Find the previous (in terms of "
"navigation\n"
"dimensions) spike in the data.")
view = tu.View(tu.Group(
tu.Group(
tu.Item('click_to_show_instructions',
show_label=False, ),
tu.Item('show_derivative_histogram',
show_label=False,
tooltip="To determine the appropriate threshold,\n"
"plot the derivative magnitude histogram, \n"
"and look for outliers at high magnitudes \n"
"(which represent sudden spikes in the data)"),
'threshold',
show_border=True,
),
tu.Group(
'add_noise',
'interpolator_kind',
'default_spike_width',
tu.Group(
'spline_order',
enabled_when='interpolator_kind == \'Spline\''),
show_border=True,
label='Advanced settings'),
),
buttons=[thisOKButton,
thisPreviousButton,
thisFindButton,
thisApplyButton, ],
handler=SpikesRemovalHandler,
title='Spikes removal tool',
resizable=False,
)
def __init__(self, signal, navigation_mask=None, signal_mask=None):
super(SpikesRemoval, self).__init__(signal)
self.interpolated_line = None
self.coordinates = [coordinate for coordinate in
signal.axes_manager._am_indices_generator()
if (navigation_mask is None or not
navigation_mask[coordinate[::-1]])]
self.signal = signal
self.line = signal._plot.signal_plot.ax_lines[0]
self.ax = signal._plot.signal_plot.ax
signal._plot.auto_update_plot = False
if len(self.coordinates) > 1:
signal.axes_manager.indices = self.coordinates[0]
self.threshold = 400
self.index = 0
self.argmax = None
self.derivmax = None
self.kind = "linear"
self._temp_mask = np.zeros(self.signal().shape, dtype='bool')
self.signal_mask = signal_mask
self.navigation_mask = navigation_mask
md = self.signal.metadata
from hyperspy.signal import BaseSignal
if "Signal.Noise_properties" in md:
if "Signal.Noise_properties.variance" in md:
self.noise_variance = md.Signal.Noise_properties.variance
if isinstance(md.Signal.Noise_properties.variance, BaseSignal):
self.noise_type = "heteroscedastic"
else:
self.noise_type = "white"
else:
self.noise_type = "shot noise"
def _threshold_changed(self, old, new):
self.index = 0
self.update_plot()
def _click_to_show_instructions_fired(self):
m = information(None,
"\nTo remove spikes from the data:\n\n"
" 1. Click \"Show derivative histogram\" to "
"determine at what magnitude the spikes are present.\n"
" 2. Enter a suitable threshold (lower than the "
"lowest magnitude outlier in the histogram) in the "
"\"Threshold\" box, which will be the magnitude "
"from which to search. \n"
" 3. Click \"Find next\" to find the first spike.\n"
" 4. If desired, the width and position of the "
"boundaries used to replace the spike can be "
"adjusted by clicking and dragging on the displayed "
"plot.\n "
" 5. View the spike (and the replacement data that "
"will be added) and click \"Remove spike\" in order "
"to alter the data as shown. The tool will "
"automatically find the next spike to replace.\n"
" 6. Repeat this process for each spike throughout "
"the dataset, until the end of the dataset is "
"reached.\n"
" 7. Click \"OK\" when finished to close the spikes "
"removal tool.\n\n"
"Note: Various settings can be configured in "
"the \"Advanced settings\" section. Hover the "
"mouse over each parameter for a description of what "
"it does."
"\n",
title="Instructions"),
def _show_derivative_histogram_fired(self):
self.signal._spikes_diagnosis(signal_mask=self.signal_mask,
navigation_mask=self.navigation_mask)
def detect_spike(self):
derivative = np.diff(self.signal())
if self.signal_mask is not None:
derivative[self.signal_mask[:-1]] = 0
if self.argmax is not None:
left, right = self.get_interpolation_range()
self._temp_mask[left:right] = True
derivative[self._temp_mask[:-1]] = 0
if abs(derivative.max()) >= self.threshold:
self.argmax = derivative.argmax()
self.derivmax = abs(derivative.max())
return True
else:
return False
def _reset_line(self):
if self.interpolated_line is not None:
self.interpolated_line.close()
self.interpolated_line = None
self.reset_span_selector()
def find(self, back=False):
self._reset_line()
ncoordinates = len(self.coordinates)
spike = self.detect_spike()
while not spike and (
(self.index < ncoordinates - 1 and back is False) or
(self.index > 0 and back is True)):
if back is False:
self.index += 1
else:
self.index -= 1
spike = self.detect_spike()
if spike is False:
messages.information('End of dataset reached')
self.index = 0
self._reset_line()
return
else:
minimum = max(0, self.argmax - 50)
maximum = min(len(self.signal()) - 1, self.argmax + 50)
thresh_label = DerivativeTextParameters(
text="$\mathsf{\delta}_\mathsf{max}=$",
color="black")
self.ax.legend([thresh_label], [repr(int(self.derivmax))],
handler_map={DerivativeTextParameters:
DerivativeTextHandler()},
loc='best')
self.ax.set_xlim(
self.signal.axes_manager.signal_axes[0].index2value(
minimum),
self.signal.axes_manager.signal_axes[0].index2value(
maximum))
self.update_plot()
self.create_interpolation_line()
def update_plot(self):
if self.interpolated_line is not None:
self.interpolated_line.close()
self.interpolated_line = None
self.reset_span_selector()
self.update_spectrum_line()
if len(self.coordinates) > 1:
self.signal._plot.pointer._update_patch_position()
def update_spectrum_line(self):
self.line.auto_update = True
self.line.update()
self.line.auto_update = False
def _index_changed(self, old, new):
self.signal.axes_manager.indices = self.coordinates[new]
self.argmax = None
self._temp_mask[:] = False
def on_disabling_span_selector(self):
if self.interpolated_line is not None:
self.interpolated_line.close()
self.interpolated_line = None
def _spline_order_changed(self, old, new):
self.kind = self.spline_order
self.span_selector_changed()
def _add_noise_changed(self, old, new):
self.span_selector_changed()
def _interpolator_kind_changed(self, old, new):
if new == 'linear':
self.kind = new
else:
self.kind = self.spline_order
self.span_selector_changed()
def _ss_left_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def _ss_right_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def create_interpolation_line(self):
self.interpolated_line = drawing.signal1d.Signal1DLine()
self.interpolated_line.data_function = self.get_interpolated_spectrum
self.interpolated_line.set_line_properties(
color='blue',
type='line')
self.signal._plot.signal_plot.add_line(self.interpolated_line)
self.interpolated_line.autoscale = False
self.interpolated_line.plot()
def get_interpolation_range(self):
axis = self.signal.axes_manager.signal_axes[0]
if np.isnan(self.ss_left_value) or np.isnan(self.ss_right_value):
left = self.argmax - self.default_spike_width
right = self.argmax + self.default_spike_width
else:
left = axis.value2index(self.ss_left_value)
right = axis.value2index(self.ss_right_value)
# Clip to the axis dimensions
nchannels = self.signal.axes_manager.signal_shape[0]
left = left if left >= 0 else 0
right = right if right < nchannels else nchannels - 1
return left, right
def get_interpolated_spectrum(self, axes_manager=None):
data = self.signal().copy()
axis = self.signal.axes_manager.signal_axes[0]
left, right = self.get_interpolation_range()
if self.kind == 'linear':
pad = 1
else:
pad = 10
ileft = left - pad
iright = right + pad
ileft = np.clip(ileft, 0, len(data))
iright = np.clip(iright, 0, len(data))
left = int(np.clip(left, 0, len(data)))
right = int(np.clip(right, 0, len(data)))
x = np.hstack((axis.axis[ileft:left], axis.axis[right:iright]))
y = np.hstack((data[ileft:left], data[right:iright]))
if ileft == 0:
# Extrapolate to the left
data[left:right] = data[right + 1]
elif iright == (len(data) - 1):
# Extrapolate to the right
data[left:right] = data[left - 1]
else:
# Interpolate
intp = sp.interpolate.interp1d(x, y, kind=self.kind)
data[left:right] = intp(axis.axis[left:right])
# Add noise
if self.add_noise is True:
if self.noise_type == "white":
data[left:right] += np.random.normal(
scale=np.sqrt(self.noise_variance),
size=right - left)
elif self.noise_type == "heteroscedastic":
noise_variance = self.noise_variance(
axes_manager=self.signal.axes_manager)[left:right]
noise = [np.random.normal(scale=np.sqrt(item))
for item in noise_variance]
data[left:right] += noise
else:
data[left:right] = np.random.poisson(
np.clip(data[left:right], 0, np.inf))
return data
def span_selector_changed(self):
if self.interpolated_line is None:
return
else:
self.interpolated_line.update()
def apply(self):
self.signal()[:] = self.get_interpolated_spectrum()
self.signal.events.data_changed.trigger(obj=self.signal)
self.update_spectrum_line()
self.interpolated_line.close()
self.interpolated_line = None
self.reset_span_selector()
self.find()
# For creating a text handler in legend (to label derivative magnitude)
class DerivativeTextParameters(object):
def __init__(self, text, color):
self.my_text = text
self.my_color = color
class DerivativeTextHandler(object):
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0, y0 = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
patch = mpl_text.Text(
text=orig_handle.my_text,
color=orig_handle.my_color)
handlebox.add_artist(patch)
return patch
|
vidartf/hyperspy
|
hyperspy/gui/egerton_quantification.py
|
Python
|
gpl-3.0
| 22,660
|
[
"Gaussian"
] |
fca77ab39ac6b50f32a7caec36633019a2d6278065df7df82a02fe4ea7ab96da
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Quantum ESPRESSO, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_QuantumESPRESSO(ConfigureMake):
"""Support for building and installing Quantum ESPRESSO."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Quantum ESPRESSO."""
extra_vars = {
'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM],
'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to Quantum ESPRESSO."""
super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.install_subdir = "espresso-%s" % self.version
def patch_step(self):
"""Patch files from build dir (not start dir)."""
super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir)
def configure_step(self):
"""Custom configuration procedure for Quantum ESPRESSO."""
if self.cfg['hybrid']:
self.cfg.update('configopts', '--enable-openmp')
if not self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--disable-parallel')
if not self.cfg['with_scalapack']:
self.cfg.update('configopts', '--without-scalapack')
repls = []
if self.toolchain.comp_family() in [toolchain.INTELCOMP]:
# set preprocessor command (-E to stop after preprocessing, -C to preserve comments)
cpp = "%s -E -C" % os.getenv('CC')
repls.append(('CPP', cpp, False))
env.setvar('CPP', cpp)
# also define $FCCPP, but do *not* include -C (comments should not be preserved when preprocessing Fortran)
env.setvar('FCCPP', "%s -E" % os.getenv('CC'))
super(EB_QuantumESPRESSO, self).configure_step()
# compose list of DFLAGS (flag, value, keep_stuff)
# for guidelines, see include/defs.h.README in sources
dflags = []
comp_fam_dflags = {
toolchain.INTELCOMP: '-D__INTEL',
toolchain.GCC: '-D__GFORTRAN -D__STD_F95',
}
dflags.append(comp_fam_dflags[self.toolchain.comp_family()])
libfft = os.getenv('LIBFFT')
if libfft:
if "fftw3" in libfft:
dflags.append('-D__FFTW3')
else:
dflags.append('-D__FFTW')
env.setvar('FFTW_LIBS', libfft)
if get_software_root('ACML'):
dflags.append('-D__ACML')
if self.toolchain.options.get('usempi', None):
dflags.append('-D__MPI -D__PARA')
if self.cfg['hybrid']:
dflags.append(" -D__OPENMP")
if self.cfg['with_scalapack']:
dflags.append(" -D__SCALAPACK")
# always include -w to supress warnings
dflags.append('-w')
repls.append(('DFLAGS', ' '.join(dflags), False))
# complete C/Fortran compiler and LD flags
if self.cfg['hybrid']:
repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True))
repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True))
# obtain library settings
libs = []
for lib in ['BLAS', 'LAPACK', 'FFT', 'SCALAPACK']:
val = os.getenv('LIB%s' % lib)
repls.append(('%s_LIBS' % lib, val, False))
libs.append(val)
libs = ' '.join(libs)
repls.append(('BLAS_LIBS_SWITCH', 'external', False))
repls.append(('LAPACK_LIBS_SWITCH', 'external', False))
repls.append(('LD_LIBS', os.getenv('LIBS'), False))
self.log.debug("List of replacements to perform: %s" % repls)
# patch make.sys file
fn = os.path.join(self.cfg['start_dir'], 'make.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
for (k, v, keep) in repls:
# need to use [ \t]* instead of \s*, because vars may be undefined as empty,
# and we don't want to include newlines
if keep:
line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line)
else:
line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line)
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub(r"\$\(MPIF90\) \$\(F90FLAGS\) -c \$<",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90.sys')
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix)
if len(wantdirs) != 0:
wantdir = os.path.join(self.builddir, wantdirs[0])
make_sys_in_path = None
cand_paths = [os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in')]
for path in cand_paths:
full_path = os.path.join(wantdir, path)
if os.path.exists(full_path):
make_sys_in_path = full_path
break
if make_sys_in_path is None:
raise EasyBuildError("Failed to find make.sys.in in want directory %s, paths considered: %s",
wantdir, ', '.join(cand_paths))
try:
for line in fileinput.input(make_sys_in_path, inplace=1, backup='.orig.eb'):
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("@f90rule@",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
# move non-espresso directories to where they're expected and create symlinks
try:
dirnames = [d for d in os.listdir(self.builddir) if not d.startswith('espresso')]
targetdir = os.path.join(self.builddir, "espresso-%s" % self.version)
for dirname in dirnames:
shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname))
self.log.info("Moved %s into %s" % (dirname, targetdir))
dirname_head = dirname.split('-')[0]
linkname = None
if dirname_head == 'sax':
linkname = 'SaX'
if dirname_head == 'wannier90':
linkname = 'W90'
elif dirname_head in ['gipaw', 'plumed', 'want', 'yambo']:
linkname = dirname_head.upper()
if linkname:
os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname))
except OSError, err:
raise EasyBuildError("Failed to move non-espresso directories: %s", err)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for Quantum ESPRESSO."""
# build list of expected binaries based on make targets
bins = ["iotk", "iotk.x", "iotk_print_kinds.x"]
if 'cp' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["cp.x", "cppp.x", "wfdd.x"])
if 'gww' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["gww_fit.x", "gww.x", "head.x", "pw4gww.x"])
if 'ld1' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["ld1.x"])
if 'gipaw' in self.cfg['buildopts']:
bins.extend(["gipaw.x"])
if 'neb' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["neb.x", "path_interpolation.x"])
if 'ph' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["d3.x", "dynmat.x", "lambda.x", "matdyn.x", "ph.x", "phcg.x", "q2r.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["fqha.x", "q2qstar.x"])
if 'pp' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["average.x", "bands.x", "dos.x", "epsilon.x", "initial_state.x",
"plan_avg.x", "plotband.x", "plotproj.x", "plotrho.x", "pmw.x", "pp.x",
"projwfc.x", "sumpdos.x", "pw2wannier90.x", "pw_export.x", "pw2gw.x",
"wannier_ham.x", "wannier_plot.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["pw2bgw.x", "bgw2pw.x"])
else:
bins.extend(["pw2casino.x"])
if 'pw' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["dist.x", "ev.x", "kpoints.x", "pw.x", "pwi2xsf.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["generate_vdW_kernel_table.x"])
else:
bins.extend(["path_int.x"])
if LooseVersion(self.version) < LooseVersion("5.3.0"):
bins.extend(["band_plot.x", "bands_FS.x", "kvecs_FS.x"])
if 'pwcond' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["pwcond.x"])
if 'tddfpt' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["turbo_lanczos.x", "turbo_spectrum.x"])
upftools = []
if 'upf' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
upftools = ["casino2upf.x", "cpmd2upf.x", "fhi2upf.x", "fpmd2upf.x", "ncpp2upf.x",
"oldcp2upf.x", "read_upf_tofile.x", "rrkj2upf.x", "uspp2upf.x", "vdb2upf.x",
"virtual.x"]
if LooseVersion(self.version) > LooseVersion("5"):
upftools.extend(["interpolate.x", "upf2casino.x"])
if 'vdw' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["vdw.x"])
if 'w90' in self.cfg['buildopts']:
bins.extend(["wannier90.x"])
want_bins = []
if 'want' in self.cfg['buildopts']:
want_bins = ["bands.x", "blc2wan.x", "conductor.x", "current.x", "disentangle.x",
"dos.x", "gcube2plt.x", "kgrid.x", "midpoint.x", "plot.x", "sumpdos",
"wannier.x", "wfk2etsf.x"]
if LooseVersion(self.version) > LooseVersion("5"):
want_bins.extend(["cmplx_bands.x", "decay.x", "sax2qexml.x", "sum_sgm.x"])
if 'xspectra' in self.cfg['buildopts']:
bins.extend(["xspectra.x"])
yambo_bins = []
if 'yambo' in self.cfg['buildopts']:
yambo_bins = ["a2y", "p2y", "yambo", "ypp"]
pref = self.install_subdir
custom_paths = {
'files': [os.path.join(pref, 'bin', x) for x in bins] +
[os.path.join(pref, 'upftools', x) for x in upftools] +
[os.path.join(pref, 'WANT', 'bin', x) for x in want_bins] +
[os.path.join(pref, 'YAMBO', 'bin', x) for x in yambo_bins],
'dirs': [os.path.join(pref, 'include')]
}
super(EB_QuantumESPRESSO, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom path suggestions for Quantum ESPRESSO."""
guesses = super(EB_QuantumESPRESSO, self).make_module_req_guess()
# order matters here, 'bin' should be *last* in this list to ensure it gets prepended to $PATH last,
# so it gets preference over the others
# this is important since some binaries are available in two places (e.g. dos.x in both bin and WANT/bin)
bindirs = ['upftools', 'WANT/bin', 'YAMBO/bin', 'bin']
guesses.update({
'PATH': [os.path.join(self.install_subdir, bindir) for bindir in bindirs],
'CPATH': [os.path.join(self.install_subdir, 'include')],
})
return guesses
|
hpcleuven/easybuild-easyblocks
|
easybuild/easyblocks/q/quantumespresso.py
|
Python
|
gpl-2.0
| 15,489
|
[
"ESPResSo",
"Quantum ESPRESSO",
"Wannier90",
"Yambo"
] |
f5a41e9c1a1ccf6cd3fd212a312f5669a4d9ff99a1cc410efa0f470707d5231f
|
import _ExocortexAlembicPython as alembic
import sys
import argparse
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# global variables
show_time = False
show_meta = False
show_size = False
show_vals = False
show_just_obj = False
show_ts = False
obj_filter = None
typ_filter = None
noo_filter = None
not_filter = None
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# visit the hierarchy of properties and compounds
def visit_prop(prop, depth):
if prop.isCompound():
print(depth + "compound: " + prop.getName())
for sub_prop in prop.getPropertyNames():
visit_prop(prop.getProperty(sub_prop), depth+" ")
else:
print(depth + "property: \"" + prop.getName() + "\", " + prop.getType())
if show_size or show_vals:
for i in xrange(0, prop.getNbStoredSamples()):
if show_vals:
print(depth + "-> values: " + str(prop.getValues(i)) )
elif show_size:
print(depth + "-> size: " + str(len(prop.getValues(i))) )
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# start the visit of the properties
def visit_object(obj):
for prop in obj.getPropertyNames():
visit_prop(obj.getProperty(prop), " ")
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
# inspect the base of the archive, use the filter to discard objects if necessary
def visit_alembic(abc_archive):
global show_just_obj
global obj_filter
global typ_filter
global noo_filter
global not_filter
global show_ts
if show_time:
print("Time sampling: " + str(abc_archive.getSampleTimes()))
for identifier in abc_archive.getIdentifiers():
if (obj_filter != None and identifier.find(obj_filter) < 0) or (noo_filter != None and identifier.find(noo_filter) >= 0):
continue # pass over this object!
obj = abc_archive.getObject(identifier)
obj_typ = obj.getType()
if (typ_filter != None and obj_typ.find(typ_filter) < 0) or (not_filter != None and obj_typ.find(not_filter) >= 0):
continue # pass over this object because of its type!
print("OBJ: " + identifier + ", " + obj_typ)
if show_meta:
print("-- meta data: " + str(obj.getMetaData()))
if show_ts:
print("-- TS index: " + str(obj.getTsIndex()))
if not show_just_obj:
visit_object(obj)
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
def main(args):
global show_time
global show_meta
global show_size
global show_vals
global show_just_obj
global obj_filter
global typ_filter
global noo_filter
global not_filter
global show_ts
# parser args
parser = argparse.ArgumentParser(description="Explore the structure of an Alembic file.")
parser.add_argument("abc_in", type=str, metavar="{Alembic file}", help="input Alembic file to explore")
parser.add_argument("-v", "--vals", action='store_true', help='show the values of the properties')
parser.add_argument("-s", "--size", action='store_true', help='show only the number of values stored in the properties')
parser.add_argument("-m", "--meta", action='store_true', help='show objects\' meta data')
parser.add_argument("-t", "--time", action='store_true', help='show time sampling')
parser.add_argument("-O", "--object", action='store_true', help='show only objects, not properties')
parser.add_argument("-f", "--filter", type=str, metavar="{id filter}", help="only show objects containing substring {id filter} in their identifier")
parser.add_argument("-T", "--typefilter", type=str, metavar="{type filter}", help="only show objects containing substring {type filter} in their type")
parser.add_argument("-nf", "--NOTfilter", type=str, metavar="{id filter}", help="only copy objects NOT containing substring {id filter} in their identifier")
parser.add_argument("-nT", "--NOTtypefilter", type=str, metavar="{type filter}", help="only copy objects NOT containing substring {type filter} in their type")
parser.add_argument("-S", "--samp", action='store_true', help="show object's time sampling index")
ns = vars(parser.parse_args(args[1:]))
show_time = ns["time"]
show_meta = ns["meta"]
show_size = ns["size"]
show_vals = ns["vals"]
show_ts = ns["samp"]
obj_filter = ns["filter"]
typ_filter = ns["typefilter"]
noo_filter = ns["NOTfilter"]
not_filter = ns["NOTtypefilter"]
show_just_obj = ns["object"]
abc_archive = alembic.getIArchive(ns["abc_in"])
print("\n\nExploring " + ns["abc_in"])
visit_alembic(abc_archive)
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
if __name__ == "__main__":
main(sys.argv)
|
SqueezeStudioAnimation/ExocortexCrate
|
Python/samples/viewABC.py
|
Python
|
bsd-3-clause
| 5,186
|
[
"VisIt"
] |
49e7ca8a4e4776bebe095a9a98b6f1a3e6b4060ecbedd699ef2595b1cdfc9ab0
|
#!/usr/bin/env python
# Author: Junjun Zhang
import sys
import os
import re
import glob
import xmltodict
import json
import yaml
import copy
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from elasticsearch1 import Elasticsearch
from collections import OrderedDict
import datetime
import dateutil.parser
from itertools import izip
from distutils.version import LooseVersion
import csv
import hashlib
logger = logging.getLogger('gnos parser')
# create console handler with a higher log level
ch = logging.StreamHandler()
latest_release = 'may2016'
previous_releases = ['mar2016','oct2015', 'aug2015', 'santa_cruz']
def init_es(es_host, es_index):
es = Elasticsearch([ es_host ], http_auth=('elastic', 'changeme'), timeout=600)
#es.indices.delete( es_index, ignore=[400, 404] )
es.indices.create( es_index, ignore=400 )
# create mappings
es_mapping = open('pancan.donor.mapping.json')
es.indices.put_mapping(index=es_index, doc_type='donor', body=es_mapping.read())
es_mapping.close()
es_mapping = open('pancan.file.mapping.json')
es.indices.put_mapping(index=es_index, doc_type='bam_file', body=es_mapping.read())
es_mapping.close()
return es
def process_gnos_analysis(gnos_analysis, donors, vcf_entries, es_index, es, bam_output_fh, annotations, consensus_entries):
analysis_attrib = get_analysis_attrib(gnos_analysis)
if analysis_attrib and analysis_attrib.get('variant_workflow_name'): # variant call gnos entry
donor_unique_id = analysis_attrib.get('dcc_project_code') + '::' + analysis_attrib.get('submitter_donor_id')
if is_in_donor_blacklist(donor_unique_id):
logger.warning('ignore blacklisted donor: {} GNOS entry: {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if gnos_analysis.get('study').lower().endswith('_test'):
logger.warning('ignore variant calling entry with study ending with _test, donor: {} GNOS entry: {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
#logger.info('Create variant calling file for donor: {}, from entry {}'
# .format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
vcf_file = create_vcf_entry(donor_unique_id, analysis_attrib, gnos_analysis, annotations)
if not vcf_entries.get(donor_unique_id):
vcf_entries[donor_unique_id] = {}
vcf_entries[donor_unique_id]['vcf_entry_files'] = []
if not consensus_entries.get(donor_unique_id):
consensus_entries[donor_unique_id] = {}
consensus_entries[donor_unique_id]['consensus_entry_files'] = []
if vcf_file.get('vcf_workflow_type') in ['snv_mnv', 'indel', 'cnv', 'sv']:
consensus_entries.get(donor_unique_id)['consensus_entry_files'].append(copy.deepcopy(vcf_file))
else:
vcf_entries.get(donor_unique_id)['vcf_entry_files'].append(copy.deepcopy(vcf_file))
else: # BAM entry
if gnos_analysis.get('dcc_project_code') and gnos_analysis.get('dcc_project_code').upper() == 'TEST':
logger.warning('ignore entry with dcc_project_code being TEST, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
# if gnos_analysis.get('library_strategy') and gnos_analysis.get('library_strategy') == 'RNA-Seq':
# logger.warning('ignore entry with library_strategy being RNA-Seq for now, GNOS entry: {}'
# .format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
# return
if not gnos_analysis.get('aliquot_id'):
logger.warning('ignore entry does not have aliquot_id, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if is_in_aliquot_blacklist(gnos_analysis.get('aliquot_id'), annotations):
logger.warning('ignore blacklisted aliquot: {} GNOS entry: {}'
.format(gnos_analysis.get('aliquot_id'), gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if gnos_analysis.get('refassem_short_name') != 'unaligned' and gnos_analysis.get('refassem_short_name') != 'GRCh37':
logger.warning('ignore entry that is aligned but not aligned to GRCh37: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return # completely ignore test gnos entries for now, this is the quickest way to avoid test interferes real data
if not analysis_attrib:
logger.warning('ignore entry does not have ANALYSIS information, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if not analysis_attrib.get('dcc_project_code') or not analysis_attrib.get('submitter_donor_id') \
or '/' in analysis_attrib.get('submitter_donor_id'):
logger.warning('ignore entry does not have dcc_project_code or submitter_donor_id, or submitter_donor_id is invalid, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if not analysis_attrib.get('submitter_specimen_id') or not analysis_attrib.get('submitter_sample_id'):
logger.warning('ignore entry does not have submitter_specimen_id or submitter_sample_id, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
# added on Apr. 24, 2015 after discovering that one RNA-Seq uploaded to GNOS with TCGA barcode which was treated as a new donor
if analysis_attrib.get('dcc_project_code').endswith('-US') and \
analysis_attrib.get('submitter_donor_id').startswith('TCGA-'):
logger.warning('ignore TCGA entry submitted with barcode, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
donor_unique_id = analysis_attrib.get('dcc_project_code') + '::' + analysis_attrib.get('submitter_donor_id')
#disable this check for specimen and sample for now as we are still fixing these IDs
#for id_type in ['donor', 'specimen', 'sample']:
for id_type in ['donor']:
if not is_in_pcawg_final_list(analysis_attrib.get('dcc_project_code'), analysis_attrib.get('submitter_'+id_type+'_id'), id_type, annotations):
logger.warning('ignore non-pcawg final list {}: {} GNOS entry: {}'
.format(id_type, analysis_attrib.get('dcc_project_code')+'::'+analysis_attrib.get('submitter_'+id_type+'_id'), gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if is_in_donor_blacklist(donor_unique_id):
logger.warning('ignore blacklisted donor: {} GNOS entry: {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if is_test(analysis_attrib, gnos_analysis):
logger.warning('ignore test entry: {}'.format(gnos_analysis.get('analysis_detail_uri')))
return # completely ignore test gnos entries for now, this is the quickest way to avoid test interferes real data
if gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS'].get('TITLE') and gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['TITLE'].startswith('TCGA/ICGC PanCancer Specimen-Level Germline Variant Calling for Specimen'):
logger.warning('ignore Annai germline call entry: {}'.format(gnos_analysis.get('analysis_detail_uri')))
return
if gnos_analysis.get('library_strategy') == 'RNA-Seq' and not analysis_attrib.get('workflow_name') in ('RNA-Seq_Alignment_SOP_STAR', 'Workflow_Bundle_TOPHAT2'):
logger.warning('ignore RNA-Seq entry that is not STAR or TOPHAT2 aligned, entry: {}'.format(gnos_analysis.get('analysis_detail_uri')))
return
if (gnos_analysis.get('library_strategy') == 'WGS' and gnos_analysis.get('refassem_short_name') != 'unaligned'
and not is_train_2_aligned(analysis_attrib, gnos_analysis)
):
# TODO: we may create another ES index for obsoleted BAM entries
# TODO: we will need more sophisticated check for handling BAMs that are flagged as aligned but
# treated as unaligned (this is actually the case for TCGA input BAM entries, maybe need a full
# TCGA spciment list from Marc?)
logger.warning('ignore entry that is aligned but not by train 2 protocol: {}'
.format( gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if gnos_analysis.get('library_strategy') == 'WGS' and is_corrupted_train_2_alignment(analysis_attrib, gnos_analysis):
logger.warning('ignore entry that is aligned by train 2 protocol but seems corrupted: {}'
.format( gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
# temporary hack here to skip any VALIDATION entries
if gnos_analysis.get('library_strategy') == 'VALIDATION':
logger.warning('ignore entry that is VALIDATION: {}'
.format( gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
#TODO: put things above into one function
# temporary hack here to skip any BAM entries from odsc-tcga repo as it's supposed not contain
# any BAM data, but it does, and those aligned BAMs it has overlap with what in CGHub hence causes problems
# if 'osdc-tcga' in gnos_analysis.get('analysis_detail_uri'):
# logger.warning('ignore BAM entry in osdc-tcga repo: {}'
# .format( gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
# return
if not donors.get(donor_unique_id):
# create a new donor if not exist
donors[ donor_unique_id ] = create_donor(donor_unique_id, analysis_attrib, gnos_analysis, annotations)
else: # the donor this bam entry belongs to already exists
# perform some comparison between existing donor and the info in the current bam entry
if (donors[donor_unique_id].get('gnos_study') != gnos_analysis.get('study')):
logger.warning( 'existing donor {} has study {}, but study in current gnos ao is {}'.
format( donor_unique_id,
donors[donor_unique_id].get('gnos_study'),
gnos_analysis.get('study') ) )
# more such check may be added, no time for this now
#logger.info('Create bam file for donor: {}, from entry {}'
# .format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
# now parse out gnos analysis object info to build bam_file doc
bam_file = create_bam_file_entry(donor_unique_id, analysis_attrib, gnos_analysis, annotations)
# only do the following when it is WGS
if bam_file.get('library_strategy') == 'WGS' and bam_file.get('bam_type') == 'Specimen level aligned BAM':
if 'normal' in bam_file.get('dcc_specimen_type').lower(): # normal
if donors.get(donor_unique_id).get('normal_specimen'): # normal specimen exists
if donors.get(donor_unique_id).get('normal_specimen').get('aliquot_id') == gnos_analysis.get('aliquot_id'):
if bam_file.get('is_aligned'):
if donors.get(donor_unique_id)['normal_specimen'].get('is_aligned'):
logger.info('more than one normal aligned bam for donor: {}, entry in use: {}, additional entry found in: {}'
.format(donor_unique_id,
donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
if (not donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url').split('/')[-1]
== gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull').split('/')[-1]):
logger.warning('Two aligned BAM entries for the same normal specimen from donor: {} have different GNOS UUIDs: {} and {}'
.format(donor_unique_id,
donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
if donors.get(donor_unique_id).get('normal_specimen').get('upload_date') < bam_file.get(
'upload_date'): # the current one is newer
donors.get(donor_unique_id)['normal_specimen'].update(
prepare_aggregated_specimen_level_info(copy.deepcopy(bam_file))
)
donors.get(donor_unique_id)['gnos_repo'] = bam_file.get('gnos_repo')
else:
donors.get(donor_unique_id)['normal_specimen'].update(
prepare_aggregated_specimen_level_info(copy.deepcopy(bam_file))
)
donors.get(donor_unique_id)['gnos_repo'] = bam_file.get('gnos_repo')
else:
logger.warning('same donor: {} has different aliquot_id: {}, {} for normal specimen, entry in use: {}, additional entry found in {}'
.format(donor_unique_id,
donors.get(donor_unique_id).get('normal_specimen').get('aliquot_id'),
gnos_analysis.get('aliquot_id'),
donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
else:
# add normal_specimen
donors.get(donor_unique_id)['normal_specimen'].update(
prepare_aggregated_specimen_level_info(copy.deepcopy(bam_file))
)
# update donor's 'gnos_repo' field with normal aligned specimen
donors.get(donor_unique_id)['gnos_repo'] = bam_file.get('gnos_repo')
else: # not normal
donors.get(donor_unique_id).get('all_tumor_specimen_aliquots').add(bam_file.get('aliquot_id'))
donors.get(donor_unique_id).get('flags')['all_tumor_specimen_aliquot_counts'] = len(donors.get(donor_unique_id).get('all_tumor_specimen_aliquots'))
if bam_file.get('is_aligned'):
if donors.get(donor_unique_id).get('aligned_tumor_specimens'):
if donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots').intersection(
[ bam_file.get('aliquot_id') ]
): # multiple alignments for the same tumor aliquot_id
logger.warning('more than one tumor aligned bam for donor: {} with aliquot_id: {}, additional entry found in: {}'
.format(donor_unique_id,
bam_file.get('aliquot_id'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
else:
donors.get(donor_unique_id).get('aligned_tumor_specimens').append( copy.deepcopy(bam_file) )
donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots').add(bam_file.get('aliquot_id'))
donors.get(donor_unique_id).get('flags')['aligned_tumor_specimen_aliquot_counts'] = len(donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots'))
else: # create the first element of the list
donors.get(donor_unique_id)['aligned_tumor_specimens'] = [copy.deepcopy(bam_file)]
donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots').add(bam_file.get('aliquot_id')) # set of aliquot_id
donors.get(donor_unique_id).get('flags')['aligned_tumor_specimen_aliquot_counts'] = 1
donors.get(donor_unique_id).get('flags')['has_aligned_tumor_specimen'] = True
original_gnos = bam_file['gnos_repo']
bam_file.update( donors[ donor_unique_id ] )
bam_file['gnos_repo'] = original_gnos
del bam_file['bam_files']
del bam_file['normal_specimen']
del bam_file['aligned_tumor_specimens']
del bam_file['aligned_tumor_specimen_aliquots']
del bam_file['all_tumor_specimen_aliquots']
del bam_file['flags']
del bam_file['rna_seq']
donors[donor_unique_id]['bam_files'].append( copy.deepcopy(bam_file) )
# push to Elasticsearch
# Let's not worry about this index type, it seems not that useful
#es.index(index=es_index, doc_type='bam_file', id=bam_file['bam_gnos_ao_id'], body=json.loads( json.dumps(bam_file, default=set_default) ), timeout=90)
bam_output_fh.write(json.dumps(bam_file, default=set_default) + '\n')
def is_in_aliquot_blacklist(aliquot_id, annotations):
if annotations.get('aliquot_blacklist') and aliquot_id in annotations.get('aliquot_blacklist'):
return True
else:
return False
def is_in_pcawg_final_list(dcc_project_code, pcawg_id, id_type, annotations):
if annotations.get('pcawg_final_list').get(id_type).intersection([dcc_project_code+'::'+pcawg_id]):
return True
else:
return False
def exist_in_previous_releases(vcf_entry):
for r in previous_releases:
if vcf_entry.get('is_'+r+'_entry'): return True
return False
def choose_consensus_entry(consensus_entries, donor_unique_id):
# It is very naive now to append all the entries for the same variant type.(snv_mnv/indel/sv/cnv)
# The assumption: there is no duplicated uploading for the same aliquot of the same variant type
if not consensus_entries or not consensus_entries.get(donor_unique_id) or not consensus_entries.get(donor_unique_id).get('consensus_entry_files'):
return
for current_vcf_entry in consensus_entries.get(donor_unique_id).get('consensus_entry_files'):
variant_workflow = current_vcf_entry.get('vcf_workflow_type')
workflow_label = variant_workflow
if not consensus_entries.get(donor_unique_id).get(workflow_label): # new vcf for workflow_type
consensus_entries.get(donor_unique_id)[workflow_label] = []
consensus_entries.get(donor_unique_id)[workflow_label].append(current_vcf_entry)
return consensus_entries
def choose_vcf_entry(vcf_entries, donor_unique_id, annotations):
if not vcf_entries or not vcf_entries.get(donor_unique_id) or not vcf_entries.get(donor_unique_id).get('vcf_entry_files'):
return
for current_vcf_entry in vcf_entries.get(donor_unique_id).get('vcf_entry_files'):
variant_workflow = current_vcf_entry.get('vcf_workflow_type')
workflow_label = variant_workflow + '_variant_calling'
if not vcf_entries.get(donor_unique_id).get(workflow_label): # new vcf for workflow_type
vcf_entries.get(donor_unique_id).update({workflow_label: current_vcf_entry})
else:
workflow_previous = vcf_entries.get(donor_unique_id).get(workflow_label)
if workflow_previous.get('gnos_id') == current_vcf_entry.get('gnos_id'):
if current_vcf_entry['gnos_repo'][0] in workflow_previous.get('gnos_repo'):
logger.warning( 'Same donor: {} has multiple variant calling with same GNOS ID: {} in the same repo: {}. This should never be possible.'
.format(donor_unique_id, workflow_previous.get('gnos_id'), current_vcf_entry['gnos_repo'][0]))
else:
workflow_previous.get('gnos_repo').append(current_vcf_entry['gnos_repo'][0])
workflow_previous.get('gnos_last_modified').append(current_vcf_entry['gnos_last_modified'][0])
workflow_previous.get('gnos_published_date').append(current_vcf_entry['gnos_published_date'][0])
workflow_previous.get('effective_xml_md5sum').append(current_vcf_entry['effective_xml_md5sum'][0])
workflow_previous['exists_xml_md5sum_mismatch'] = False if len(set(workflow_previous.get('effective_xml_md5sum'))) == 1 else True
logger.info( 'Donor: {} has synchronized variant calling with GNOS ID: {} in the repos: {}'
.format(donor_unique_id, workflow_previous.get('gnos_id'), '|'.join(workflow_previous.get('gnos_repo'))))
else:
if LooseVersion(current_vcf_entry.get('vcf_workflow_result_version')) > LooseVersion(workflow_previous.get('vcf_workflow_result_version')):
vcf_entries.get(donor_unique_id).update({workflow_label: current_vcf_entry})
logger.info(workflow_label+' results for donor: {}. Keep the {} result version: {}, additional {} result version: {}'
.format(donor_unique_id, current_vcf_entry.get('vcf_workflow_result_version'), current_vcf_entry['gnos_id'], workflow_previous.get('vcf_workflow_result_version'), workflow_previous['gnos_id']))
elif LooseVersion(current_vcf_entry.get('vcf_workflow_result_version')) == LooseVersion(workflow_previous.get('vcf_workflow_result_version')):
if not workflow_previous['is_'+latest_release+'_entry']:
if current_vcf_entry['is_'+latest_release+'_entry']:
vcf_entries.get(donor_unique_id).update({workflow_label: current_vcf_entry})
logger.info(workflow_label+' results for donor: {}. Keep the {}_freeze_entry: {}, additional {}'
.format(donor_unique_id, latest_release, current_vcf_entry['gnos_id'], workflow_previous['gnos_id']))
elif not exist_in_previous_releases(current_vcf_entry) and exist_in_previous_releases(workflow_previous):
vcf_entries.get(donor_unique_id).update({workflow_label: current_vcf_entry})
logger.info(workflow_label+' results for donor: {}. Keep the entry not in previous releases: {}, additional {}'
.format(donor_unique_id, current_vcf_entry['gnos_id'], workflow_previous['gnos_id']))
elif exist_in_previous_releases(current_vcf_entry) and not exist_in_previous_releases(workflow_previous):
# no need to replace
logger.warning('{} variant calling result already exist and not in the previous releases for donor: {}, ignoring entry {} in {}'
.format(variant_workflow.upper(), donor_unique_id, current_vcf_entry.get('gnos_id'), current_vcf_entry.get('gnos_repo')[0]))
else:
workflow_version_current = current_vcf_entry.get('workflow_details').get('variant_workflow_version')
workflow_version_previous = workflow_previous.get('workflow_details').get('variant_workflow_version')
gnos_updated_current = current_vcf_entry.get('gnos_last_modified')[0]
gnos_updated_previous = workflow_previous.get('gnos_last_modified')[0]
if LooseVersion(workflow_version_current) > LooseVersion(workflow_version_previous): # current is newer version
logger.info('Newer {} variant calling result with version: {} for donor: {}, with GNOS entry: {} in {} replacing older GNOS entry {} in {}'
.format(variant_workflow.upper(), workflow_version_current, donor_unique_id, \
current_vcf_entry.get('gnos_id'), current_vcf_entry.get('gnos_repo')[0],\
workflow_previous.get('gnos_id'), '|'.join(workflow_previous.get('gnos_repo'))))
vcf_entries.get(donor_unique_id)[workflow_label] = current_vcf_entry
elif LooseVersion(workflow_version_current) == LooseVersion(workflow_version_previous) \
and gnos_updated_current > gnos_updated_previous: # current is newer
logger.info('Newer {} variant calling result with last modified date: {} for donor: {}, with GNOS entry: {} in {} replacing older GNOS entry {} in {}'
.format(variant_workflow.upper(), gnos_updated_current, donor_unique_id, \
current_vcf_entry.get('gnos_id'), current_vcf_entry.get('gnos_repo')[0],\
workflow_previous.get('gnos_id'), '|'.join(workflow_previous.get('gnos_repo'))))
vcf_entries.get(donor_unique_id)[workflow_label] = current_vcf_entry
else: # no need to replace
logger.warning('{} variant calling result already exist and is latest for donor: {}, ignoring entry {} in {}'
.format(variant_workflow.upper(), donor_unique_id, current_vcf_entry.get('gnos_id'), current_vcf_entry.get('gnos_repo')[0]))
else:
# no need to replace
logger.warning('{} variant calling result already exist and in the latest release for donor: {}, ignoring entry {} in {}'
.format(variant_workflow.upper(), donor_unique_id, current_vcf_entry.get('gnos_id'), current_vcf_entry.get('gnos_repo')[0]))
else:
# no need to replace
logger.warning('{} variant calling result already exist and has latest result version for donor: {}, ignoring entry {} in {}'
.format(variant_workflow.upper(), donor_unique_id, current_vcf_entry.get('gnos_id'), current_vcf_entry.get('gnos_repo')[0]))
def create_vcf_entry(donor_unique_id, analysis_attrib, gnos_analysis, annotations):
files = []
if isinstance(gnos_analysis.get('files').get('file'), dict):
file_list = [gnos_analysis.get('files').get('file')]
elif isinstance(gnos_analysis.get('files').get('file'), list):
file_list = gnos_analysis.get('files').get('file')
else:
logger.warning('Variant calling result donor: {}, likely incorrectly populated the files section, in GNOS entry {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
for f in file_list:
files.append({'file_name': f.get('filename'), 'file_size': f.get('filesize'), 'file_md5sum': f.get('checksum').get('#text')})
vcf_entry = {
#'analysis_attrib': analysis_attrib, # remove this later
#'gnos_analysis': gnos_analysis, # remove this later
"gnos_id": gnos_analysis.get('analysis_id'),
"gnos_repo": [gnos_analysis.get('analysis_detail_uri').split('/cghub/')[0] + '/'],
"gnos_last_modified": [dateutil.parser.parse(gnos_analysis.get('last_modified'))],
"gnos_published_date": [dateutil.parser.parse(gnos_analysis.get('published_date'))],
"files": files,
"study": gnos_analysis.get('study'),
"effective_xml_md5sum": [gnos_analysis.get('_effective_xml_md5sum')],
"is_santa_cruz_entry": True if gnos_analysis.get('analysis_id') in annotations.get('santa_cruz').get('gnos_id') else False,
"is_aug2015_entry": True if gnos_analysis.get('analysis_id') in annotations.get('aug2015').get('gnos_id') else False,
"is_oct2015_entry": True if gnos_analysis.get('analysis_id') in annotations.get('oct2015').get('gnos_id') else False,
"is_mar2016_entry": True if gnos_analysis.get('analysis_id') in annotations.get('mar2016').get('gnos_id') else False,
"is_may2016_entry": True if gnos_analysis.get('analysis_id') in annotations.get('may2016').get('gnos_id') else False,
"is_s3_transfer_scheduled": True if gnos_analysis.get('analysis_id') in annotations.get('s3_transfer_scheduled') else False,
"is_s3_transfer_completed": True if gnos_analysis.get('analysis_id') in annotations.get('s3_transfer_completed') else False,
"exists_xml_md5sum_mismatch": False,
"variant_calling_performed_at": gnos_analysis.get('analysis_xml').get('ANALYSIS_SET').get('ANALYSIS').get('@center_name'),
"workflow_details": {
"variant_workflow_name": analysis_attrib.get('variant_workflow_name'),
"variant_workflow_version": analysis_attrib.get('variant_workflow_version'),
"variant_pipeline_input_info": json.loads( analysis_attrib.get('variant_pipeline_input_info') ).get('workflow_inputs') if analysis_attrib.get('variant_pipeline_input_info') else [],
"variant_pipeline_output_info": json.loads( analysis_attrib.get('variant_pipeline_output_info') ).get('workflow_outputs') if analysis_attrib.get('variant_pipeline_output_info') else [],
"variant_qc_metrics": {},
"variant_timing_metrics": {}
}
}
qc = {}
try:
qc = json.loads( analysis_attrib.get('variant_qc_metrics') ).get('qc_metrics')
except:
logger.warning('variant_qc_metrics format incorrect: {}'.format(analysis_attrib.get('variant_qc_metrics')))
if isinstance(qc, dict): vcf_entry.get('workflow_details')['variant_qc_metrics'] = qc
# DO NOT KEEP timing metrics, it's way too verbose
#timing = json.loads( analysis_attrib.get('variant_timing_metrics') ).get('timing_metrics') if analysis_attrib.get('variant_timing_metrics') else {}
#if isinstance(timing, dict): vcf_entry.get('workflow_details')['variant_timing_metrics'] = timing
#print json.dumps(vcf_entry) # debugging only
workflow_name = vcf_entry.get('workflow_details').get('variant_workflow_name')
workflow_version = vcf_entry.get('workflow_details').get('variant_workflow_version')
if workflow_name == 'SangerPancancerCgpCnIndelSnvStr+SVFIX2' and (( workflow_version.startswith('1.0.') or workflow_version.startswith('1.1.'))
and not workflow_version in ['1.0.0', '1.0.1']):
vcf_entry['vcf_workflow_type'] = 'sanger'
vcf_entry['vcf_workflow_result_version'] = 'v3'
elif workflow_name == 'SangerPancancerCgpCnIndelSnvStr+SVFIX' and (( workflow_version.startswith('1.0.') or workflow_version.startswith('1.1.'))
and not workflow_version in ['1.0.0', '1.0.1']):
vcf_entry['vcf_workflow_type'] = 'sanger'
vcf_entry['vcf_workflow_result_version'] = 'v2'
elif workflow_name == 'SangerPancancerCgpCnIndelSnvStr' and (( workflow_version.startswith('1.0.') or workflow_version.startswith('1.1.'))
and not workflow_version in ['1.0.0', '1.0.1']):
vcf_entry['vcf_workflow_type'] = 'sanger'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name.startswith('EMBLPancancer') and LooseVersion(workflow_version) >= LooseVersion('1.0.0'):
vcf_entry['vcf_workflow_type'] = 'embl'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'DKFZPancancerCnIndelSnv' and LooseVersion(workflow_version) >= LooseVersion('1.0.0'):
vcf_entry['vcf_workflow_type'] = 'dkfz'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'EMBLDKFZPancancerStrCnIndelSNV' and LooseVersion(workflow_version) >= LooseVersion('1.0.5'):
vcf_entry['vcf_workflow_type'] = 'dkfz_embl'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'DKFZ_EMBL_Combined_HPC':
vcf_entry['vcf_workflow_type'] = 'dkfz_embl'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'DKFZ_EMBL_Merged':
vcf_entry['vcf_workflow_type'] = 'dkfz_embl'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'BROAD_MUSE_PIPELINE' or workflow_name == 'BROAD_MUSE_PIPELINE_SEVEN_BRIDGES':
vcf_entry.get('workflow_details')['workflow_file_subset'] = analysis_attrib.get('workflow_file_subset')
vcf_entry.get('workflow_details')['related_file_subset_uuids'] = analysis_attrib.get('related_file_subset_uuids').split(',')
if vcf_entry.get('workflow_details').get('workflow_file_subset') == 'broad':
vcf_entry['vcf_workflow_type'] = 'broad'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif vcf_entry.get('workflow_details').get('workflow_file_subset') == 'broad-v2':
vcf_entry['vcf_workflow_type'] = 'broad'
vcf_entry['vcf_workflow_result_version'] = 'v2'
elif vcf_entry.get('workflow_details').get('workflow_file_subset') == 'broad-v3':
vcf_entry['vcf_workflow_type'] = 'broad'
vcf_entry['vcf_workflow_result_version'] = 'v3'
elif vcf_entry.get('workflow_details').get('workflow_file_subset') == 'muse':
vcf_entry['vcf_workflow_type'] = 'muse'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif vcf_entry.get('workflow_details').get('workflow_file_subset') == 'broad_tar':
vcf_entry['vcf_workflow_type'] = 'broad_tar'
vcf_entry['vcf_workflow_result_version'] = 'v1'
else:
vcf_entry['vcf_workflow_type'] = 'Unknown_broad'
vcf_entry['vcf_workflow_result_version'] = 'v1' # we always need this key, this line is added by Junjun on Feb 24, 2016
logger.warning('broad variant calling entry which has unknown file type {}, donor: {} GNOS entry: {}'
.format(vcf_entry.get('workflow_details')['workflow_file_subset'], donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
elif workflow_name == 'OxoGWorkflow-OxoGFiltering':
vcf_entry['vcf_workflow_type'] = 'oxog'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'OxoGWorkflow-variantbam':
vcf_entry['vcf_workflow_type'] = 'minibam'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'consensus_snv_mnv':
vcf_entry['vcf_workflow_type'] = 'snv_mnv'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'consensus_indel':
vcf_entry['vcf_workflow_type'] = 'indel'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'consensus_cnv':
vcf_entry['vcf_workflow_type'] = 'cnv'
vcf_entry['vcf_workflow_result_version'] = 'v1'
elif workflow_name == 'consensus_sv':
vcf_entry['vcf_workflow_type'] = 'sv'
vcf_entry['vcf_workflow_result_version'] = 'v1'
else:
vcf_entry['vcf_workflow_type'] = 'Unknown'
vcf_entry['vcf_workflow_result_version'] = 'v1' # we always need this key, this line is added by Junjun on Feb 24, 2016 to avoid code crash when an unknow variant call entry shows up
logger.warning('the entry is variant calling but likely is test entry, donor: {} GNOS entry: {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return vcf_entry
def set_default(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
raise TypeError
def prepare_aggregated_specimen_level_info(bam_file):
specimen = copy.deepcopy(bam_file)
# TODO: actual aggregation to be completed
return specimen
def is_in_donor_blacklist(donor_unique_id):
donor_blacklist = set([
"PACA-CA::PCSI_0449",
"PACA-CA::PCSI_0309",
"LIHC-US::G1551",
"LIHC-US::G15512",
"TCGA_MUT_BENCHMARK_4::G15511",
"TCGA_MUT_BENCHMARK_4::G15512",
"PBCA-DE::SNV_CALLING_TEST"
])
if donor_blacklist.intersection([donor_unique_id]):
return True
else:
return False
def create_bam_file_entry(donor_unique_id, analysis_attrib, gnos_analysis, annotations):
file_info = parse_bam_file_info(gnos_analysis.get('files').get('file'))
bam_file = {
"dcc_specimen_type": analysis_attrib.get('dcc_specimen_type'),
"submitter_specimen_id": analysis_attrib.get('submitter_specimen_id'),
"submitter_sample_id": analysis_attrib.get('submitter_sample_id'),
"icgc_specimen_id": get_icgc_id(donor_unique_id, analysis_attrib['dcc_project_code'], analysis_attrib['submitter_specimen_id'], 'specimen', annotations),
"icgc_sample_id": get_icgc_id(donor_unique_id, analysis_attrib['dcc_project_code'], analysis_attrib['submitter_sample_id'], 'sample', annotations),
"aliquot_id": gnos_analysis.get('aliquot_id'),
"use_cntl": analysis_attrib.get('use_cntl'),
"total_lanes": analysis_attrib.get('total_lanes'),
"oxog_score": annotations.get('oxog_score').get(gnos_analysis.get('aliquot_id')) if annotations.get('oxog_score').get(gnos_analysis.get('aliquot_id')) else None,
"ContEST": annotations.get('ContEST').get(gnos_analysis.get('aliquot_id')) if annotations.get('ContEST').get(gnos_analysis.get('aliquot_id')) else None,
"Stars": annotations.get('Stars').get(gnos_analysis.get('aliquot_id')) if annotations.get('Stars').get(gnos_analysis.get('aliquot_id')) else None,
"effective_xml_md5sum": gnos_analysis.get('_effective_xml_md5sum'),
"is_santa_cruz_entry": True if gnos_analysis.get('analysis_id') in annotations.get('santa_cruz').get('gnos_id') else False,
"is_aug2015_entry": True if gnos_analysis.get('analysis_id') in annotations.get('aug2015').get('gnos_id') else False,
"is_oct2015_entry": True if gnos_analysis.get('analysis_id') in annotations.get('oct2015').get('gnos_id') else False,
"is_mar2016_entry": True if gnos_analysis.get('analysis_id') in annotations.get('mar2016').get('gnos_id') else False,
"is_may2016_entry": True if gnos_analysis.get('analysis_id') in annotations.get('may2016').get('gnos_id') else False,
"is_s3_transfer_scheduled": True if gnos_analysis.get('analysis_id') in annotations.get('s3_transfer_scheduled') else False,
"is_s3_transfer_completed": True if gnos_analysis.get('analysis_id') in annotations.get('s3_transfer_completed') else False,
"library_strategy": gnos_analysis.get('library_strategy'),
"gnos_repo": gnos_analysis.get('analysis_detail_uri').split('/cghub/')[0] + '/',
"gnos_metadata_url": gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull'),
"refassem_short_name": gnos_analysis.get('refassem_short_name'),
"bam_gnos_ao_id": gnos_analysis.get('analysis_id'),
"upload_date": dateutil.parser.parse(gnos_analysis.get('upload_date')),
"published_date": dateutil.parser.parse(gnos_analysis.get('published_date')),
"last_modified": dateutil.parser.parse(gnos_analysis.get('last_modified')),
"bam_file_name": file_info.get('file_name'),
"bam_file_size": file_info.get('file_size'),
"md5sum": file_info.get('md5sum'),
"bai_file_name": file_info.get('bai_file_name'),
"bai_file_size": file_info.get('bai_file_size'),
"bai_file_md5sum": file_info.get('bai_file_md5sum'),
}
# much more TODO for bam file info and alignment details
if bam_file.get('refassem_short_name') == 'unaligned' and \
gnos_analysis.get('library_strategy') == 'WGS' :
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'Unaligned BAM'
bam_file['alignment'] = None # or initiate as empty object {}, depending on how ES searches it
elif (analysis_attrib.get('workflow_output_bam_contents') == 'unaligned'
or gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].startswith('The BAM file includes unmapped reads extracted from specimen-level BAM with the reference alignment')
) and gnos_analysis.get('library_strategy') == 'WGS' : # this is actually BAM with unmapped reads
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'Specimen level unmapped reads after BWA alignment'
bam_file['alignment'] = None
elif gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].startswith('Specimen-level BAM from the reference alignment') \
and gnos_analysis.get('library_strategy') == 'WGS' :
bam_file['is_aligned'] = True
bam_file['bam_type'] = 'Specimen level aligned BAM'
bam_file['alignment'] = get_alignment_detail(analysis_attrib, gnos_analysis)
elif (gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].lower().startswith('star ') \
or gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].lower().startswith('tophat2 ')) \
and gnos_analysis.get('library_strategy') == 'RNA-Seq' :
bam_file['is_aligned'] = True
bam_file['bam_type'] = 'RNA-Seq aligned BAM'
bam_file['alignment'] = get_rna_seq_alignment_detail(analysis_attrib, gnos_analysis)
elif (bam_file.get('refassem_short_name') == 'unaligned' and gnos_analysis.get('library_strategy') == 'RNA-Seq'):
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'RNA-Seq unaligned BAM'
bam_file['alignment'] = None
else:
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'Unknown'
bam_file['alignment'] = None
return bam_file
def get_rna_seq_alignment_detail(analysis_attrib, gnos_analysis):
alignment = {
"workflow_name": analysis_attrib.get('workflow_name'),
"workflow_version": analysis_attrib.get('workflow_version'),
"workflow_bundle_url": analysis_attrib.get('workflow_bundle_url'),
"workflow_source_url": analysis_attrib.get('workflow_source_url')
}
return alignment
def get_alignment_detail(analysis_attrib, gnos_analysis):
alignment = {
"data_train": "Train 2",
"workflow_name": analysis_attrib.get('workflow_name'),
"workflow_version": analysis_attrib.get('workflow_version'),
"workflow_bundle_url": analysis_attrib.get('workflow_bundle_url'),
"workflow_source_url": analysis_attrib.get('workflow_source_url'),
"pipeline_input_info": json.loads( analysis_attrib.get('pipeline_input_info') ).get('pipeline_input_info') if analysis_attrib.get('pipeline_input_info') else [],
"qc_metrics": json.loads( analysis_attrib.get('qc_metrics').replace('"not_collected"', 'null') ).get('qc_metrics') if analysis_attrib.get('qc_metrics') else [],
"markduplicates_metrics": json.loads( analysis_attrib.get('markduplicates_metrics') ).get('markduplicates_metrics') if analysis_attrib.get('markduplicates_metrics') else [],
"timing_metrics": json.loads( analysis_attrib.get('timing_metrics').replace('"not_collected"', 'null') ).get('timing_metrics') if analysis_attrib.get('timing_metrics') else [],
}
alignment['input_bam_summary'] = {} # TODO: do this in a function
return alignment
def parse_bam_file_info(file_fragment):
file_info = {}
if (type(file_fragment) != list): file_fragment = [file_fragment]
for f in file_fragment:
f = dict(f)
if f.get('filename').endswith('.bam'): # assume there is only one BAM file
file_info['file_name'] = f.get('filename')
file_info['file_size'] = int(f.get('filesize'))
file_info['md5sum'] = f.get('checksum').get('#text')
elif f.get('filename').endswith('.bai'): # assume there is only one BAI file
file_info['bai_file_name'] = f.get('filename')
file_info['bai_file_size'] = int(f.get('filesize'))
file_info['bai_file_md5sum'] = f.get('checksum').get('#text')
return file_info
def is_train_2_aligned(analysis_attrib, gnos_analysis):
if ( gnos_analysis.get('refassem_short_name') == 'GRCh37'
and analysis_attrib.get('workflow_version')
and analysis_attrib.get('workflow_version').startswith('2.6.')
):
return True
else:
return False
def is_corrupted_train_2_alignment(analysis_attrib, gnos_analysis):
if ( is_train_2_aligned(analysis_attrib, gnos_analysis)
and not gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].startswith('The BAM file includes unmapped reads extracted from specimen-level BAM with the reference alignment')
and (not analysis_attrib.get('qc_metrics') or not analysis_attrib.get('markduplicates_metrics'))
):
return True
else:
return False
def detect_and_low_case_uuid(submitter_id):
uuid_pattern = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z', re.I)
uuid = submitter_id.lower() if uuid_pattern.search(submitter_id) else submitter_id
return uuid
def get_icgc_id(donor_unique_id, dcc_project_code, submitter_id, subtype, annotations):
submitter_id = detect_and_low_case_uuid(submitter_id)
if dcc_project_code.endswith('-US'):
if not annotations.get('uuid_to_barcode').get(submitter_id):
logger.warning('donor: {}, the {} with uuid: {} has no mapping barcode'.format(donor_unique_id, subtype, submitter_id))
return None
submitter_id = annotations.get('uuid_to_barcode').get(submitter_id)
if not annotations.get('icgc_'+subtype+'_id').get(dcc_project_code+'::'+submitter_id):
logger.warning('donor: {}, the {} with pcawg_id: {} has no mapping icgc_id'.format(donor_unique_id, subtype, submitter_id))
return None
icgc_id = annotations.get('icgc_'+subtype+'_id').get(dcc_project_code+'::'+submitter_id)
return icgc_id
def create_donor(donor_unique_id, analysis_attrib, gnos_analysis, annotations):
donor = {
'donor_unique_id': donor_unique_id,
'submitter_donor_id': analysis_attrib['submitter_donor_id'],
'dcc_project_code': analysis_attrib['dcc_project_code'],
'icgc_donor_id': get_icgc_id(donor_unique_id, analysis_attrib['dcc_project_code'], analysis_attrib['submitter_donor_id'], 'donor', annotations),
'gnos_study': gnos_analysis.get('study'),
'gnos_repo': gnos_analysis.get('analysis_detail_uri').split('/cghub/')[0] + '/', # can be better
'flags': {
'is_test': is_test(analysis_attrib, gnos_analysis),
'is_cell_line': is_cell_line(analysis_attrib, gnos_analysis),
'is_train2_donor': False,
'is_train2_pilot': False,
'is_santa_cruz_donor': True if donor_unique_id in annotations.get('santa_cruz').get('donor') else False,
'is_aug2015_donor': True if donor_unique_id in annotations.get('aug2015').get('donor') else False,
'is_oct2015_donor': True if donor_unique_id in annotations.get('oct2015').get('donor') else False,
'is_mar2016_donor': True if donor_unique_id in annotations.get('mar2016').get('donor') else False,
'is_may2016_donor': True if donor_unique_id in annotations.get('may2016').get('donor') else False,
'TiN': annotations.get('TiN').get(donor_unique_id, 'NA'),
'is_normal_specimen_aligned': False,
'are_all_tumor_specimens_aligned': False,
'has_aligned_tumor_specimen': False,
'aligned_tumor_specimen_aliquot_counts': 0,
'all_tumor_specimen_aliquot_counts': 0,
'is_sanger_variant_calling_performed': False,
'is_dkfz_variant_calling_performed': False,
'is_embl_variant_calling_performed': False,
'is_dkfz_embl_variant_calling_performed': False,
'is_oxog_variant_calling_performed': False,
'is_minibam_variant_calling_performed': False,
'is_broad_variant_calling_performed': False,
'broad':{
'broad_file_subset_exist': False,
'broad_tar_file_subset_exist': False,
'muse_file_subset_exist': False,
'exist_file_subsets_mismatch': False
},
'variant_calling_performed': [],
'vcf_in_jamboree': [],
'is_normal_star_rna_seq_alignment_performed': False,
'is_normal_tophat_rna_seq_alignment_performed': False,
'is_tumor_star_rna_seq_alignment_performed': False,
'is_tumor_tophat_rna_seq_alignment_performed': False,
'exists_vcf_file_prefix_mismatch': False,
'is_bam_used_by_variant_calling_missing': False,
'qc_score': None,
'exists_xml_md5sum_mismatch': False
},
'normal_specimen': {},
'aligned_tumor_specimens': [],
'aligned_tumor_specimen_aliquots': set(),
'all_tumor_specimen_aliquots': set(),
'bam_files': [],
'rna_seq': {
'alignment': {
'normal': {},
'tumor': []
}
}
}
try:
if type(gnos_analysis.get('experiment_xml').get('EXPERIMENT_SET').get('EXPERIMENT')) == list:
donor['sequencing_center'] = gnos_analysis.get('experiment_xml').get('EXPERIMENT_SET').get('EXPERIMENT')[0].get('@center_name')
else:
donor['sequencing_center'] = gnos_analysis.get('experiment_xml').get('EXPERIMENT_SET').get('EXPERIMENT').get('@center_name')
except:
logger.warning('analysis object has no sequencing_center information: {}'.format(gnos_analysis.get('analysis_detail_uri')))
if not annotations.get('qc_donor_prioritization'):
logger.warning('Missing qc_donor_prioritization annotation')
elif annotations.get('qc_donor_prioritization').get(donor_unique_id) is not None:
donor.get('flags')['qc_score'] = annotations.get('qc_donor_prioritization').get(donor_unique_id)
else:
logger.warning('No qc prioritization score for donor: {}'.format(donor_unique_id))
return donor
def is_test(analysis_attrib, gnos_analysis):
if (gnos_analysis.get('aliquot_id') == '85098796-a2c1-11e3-a743-6c6c38d06053'
or gnos_analysis.get('study') == 'CGTEST'
or gnos_analysis.get('study') == 'icgc_pancancer_vcf_test'
or gnos_analysis.get('study').lower().endswith('_test')
):
return True
elif (analysis_attrib.get('dcc_project_code') == 'None-US'
and analysis_attrib.get('submitter_donor_id') == 'None'
and analysis_attrib.get('submitter_specimen_id') == 'None'
and analysis_attrib.get('dcc_specimen_type') == 'unknown'
):
return True
# TODO: what's the criteria for determining *test* entries
return False
def is_cell_line(analysis_attrib, gnos_analysis):
is_cell_line = False
if analysis_attrib.get('dcc_project_code') == 'TCGA_MUT_BENCHMARK_4':
is_cell_line = True
return is_cell_line
def get_analysis_attrib(gnos_analysis):
analysis_attrib = {}
if (not gnos_analysis['analysis_xml']['ANALYSIS_SET'].get('ANALYSIS')
or not gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS'].get('ANALYSIS_ATTRIBUTES')
or not gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['ANALYSIS_ATTRIBUTES'].get('ANALYSIS_ATTRIBUTE')
):
return None
analysis_attrib_fragment = gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['ANALYSIS_ATTRIBUTES']['ANALYSIS_ATTRIBUTE']
if (type(analysis_attrib_fragment) != list): analysis_attrib_fragment = [analysis_attrib_fragment]
for a in analysis_attrib_fragment:
if not analysis_attrib.get(a['TAG']):
analysis_attrib[a['TAG']] = a['VALUE']
else:
logger.warning('duplicated analysis attribute key: {}'.format(a['TAG']))
return analysis_attrib
def get_gnos_analysis(f):
with open (f, 'r') as x: xml_str = x.read()
gnos_analysis = xmltodict.parse(xml_str).get('ResultSet').get('Result')
add_effective_xml_md5sum(gnos_analysis, xml_str)
return gnos_analysis
def add_effective_xml_md5sum(gnos_analysis, xml_str):
xml_str = re.sub(r'<ResultSet .+?>', '<ResultSet>', xml_str)
xml_str = re.sub(r'<last_modified>.+?</last_modified>', '<last_modified></last_modified>', xml_str)
xml_str = re.sub(r'<upload_date>.+?</upload_date>', '<upload_date></upload_date>', xml_str)
xml_str = re.sub(r'<published_date>.+?</published_date>', '<published_date></published_date>', xml_str)
xml_str = re.sub(r'<center_name>.+?</center_name>', '<center_name></center_name>', xml_str)
xml_str = re.sub(r'<analyte_code>.+?</analyte_code>', '<analyte_code></analyte_code>', xml_str)
xml_str = re.sub(r'<reason>.+?</reason>', '<reason></reason>', xml_str)
xml_str = re.sub(r'<study>.+?</study>', '<study></study>', xml_str)
xml_str = re.sub(r'<sample_accession>.+?</sample_accession>', '<sample_accession></sample_accession>', xml_str)
#xml_str = re.sub(r'<dcc_project_code>.+?</dcc_project_code>', '<dcc_project_code></dcc_project_code>', xml_str)
#xml_str = re.sub(r'<participant_id>.+?</participant_id>', '<participant_id></participant_id>', xml_str)
xml_str = re.sub(r'<dcc_specimen_type>.+?</dcc_specimen_type>', '<dcc_specimen_type></dcc_specimen_type>', xml_str)
xml_str = re.sub(r'<specimen_id>.+?</specimen_id>', '<specimen_id></specimen_id>', xml_str)
xml_str = re.sub(r'<sample_id>.+?</sample_id>', '<sample_id></sample_id>', xml_str)
xml_str = re.sub(r'<use_cntl>.+?</use_cntl>', '<use_cntl></use_cntl>', xml_str)
xml_str = re.sub(r'<library_strategy>.+?</library_strategy>', '<library_strategy></library_strategy>', xml_str)
xml_str = re.sub(r'<platform>.+?</platform>', '<platform></platform>', xml_str)
xml_str = re.sub(r'<refassem_short_name>.+?</refassem_short_name>', '<refassem_short_name></refassem_short_name>', xml_str)
xml_str = re.sub(r'<STUDY_REF .+?/>', '<STUDY_REF/>', xml_str)
xml_str = re.sub(r'</STUDY_REF>', '', xml_str)
xml_str = re.sub(r'<STUDY_REF .+?>', '<STUDY_REF/>', xml_str)
xml_str = re.sub(r'<ANALYSIS_SET .+?>', '<ANALYSIS_SET>', xml_str)
xml_str = re.sub(r'<ANALYSIS .+?>', '<ANALYSIS>', xml_str)
xml_str = re.sub(r'<EXPERIMENT_SET .+?>', '<EXPERIMENT_SET>', xml_str)
xml_str = re.sub(r'<RUN_SET .+?>', '<RUN_SET>', xml_str)
xml_str = re.sub(r'<analysis_detail_uri>.+?</analysis_detail_uri>', '<analysis_detail_uri></analysis_detail_uri>', xml_str)
xml_str = re.sub(r'<analysis_submission_uri>.+?</analysis_submission_uri>', '<analysis_submission_uri></analysis_submission_uri>', xml_str)
xml_str = re.sub(r'<analysis_data_uri>.+?</analysis_data_uri>', '<analysis_data_uri></analysis_data_uri>', xml_str)
# we need to take care of xml properties in different order but effectively/semantically the same
effective_eq_xml = json.dumps(xmltodict.parse(xml_str).get('ResultSet').get('Result'), indent=4, sort_keys=True)
# print effective_eq_xml
# sys.exit()
gnos_analysis.update({'_effective_xml_md5sum': hashlib.md5(effective_eq_xml).hexdigest()})
return gnos_analysis
def get_xml_files( metadata_dir, conf, repo ):
xml_files = []
#ao_seen = {}
for r in conf.get('gnos_repos'):
if repo and not r.get('repo_code') == repo:
continue
gnos_ao_list_file = metadata_dir + '/analysis_objects.' + r.get('repo_code') + '.tsv'
if not os.path.isfile(gnos_ao_list_file):
logger.warning('gnos analsysi object list file does not exist: {}'.format(gnos_ao_list_file))
continue
with open(gnos_ao_list_file, 'r') as list:
for ao in list:
ao_uuid, ao_state = str.split(ao, '\t')[0:2]
if not ao_state == 'live': continue # skip ao that is not live
#if (ao_seen.get(ao_uuid)): continue # skip ao if already added
#ao_seen[ao_uuid] = 1 # include this one
xml_files.append(r.get('repo_code') + '/' + ao.replace('\t', '__').replace('\n','') + '.xml')
return xml_files
def process(metadata_dir, conf, es_index, es, donor_output_jsonl_file, bam_output_jsonl_file, repo, exclude_gnos_id_lists):
donors = {}
vcf_entries = {}
consensus_entries = {}
# update the pc_annotation-sanger_vcf_in_jamboree files using the jamboree subdirectory files
vcf_in_jamboree_dir = '../pcawg-operations/variant_calling/sanger_workflow/jamboree/'
infiles = glob.glob(vcf_in_jamboree_dir+'/Sanger_jamboree_batch*.txt')
outfile = 'pc_annotation-sanger_vcf_in_jamboree.tsv' # hard-code file name
update_vcf_jamboree(infiles, outfile)
annotations = {}
read_annotations(annotations, 'gnos_assignment', 'pc_annotation-gnos_assignment.yml') # hard-code file name for now
read_annotations(annotations, 'train2_pilot', 'pc_annotation-train2_pilot.tsv') # hard-code file name for now
read_annotations(annotations, 'donor_blacklist', '../pcawg-operations/lists/blacklist/pc_annotation-donor_blacklist.tsv') # hard-code file name for now
read_annotations(annotations, 'manual_qc_failed', 'pc_annotation-manual_qc_failed.tsv') # hard-code file name for now
read_annotations(annotations, 'sanger_vcf_in_jamboree', 'pc_annotation-sanger_vcf_in_jamboree.tsv') # hard-code file name for now
read_annotations(annotations, 'santa_cruz', '../pcawg-operations/data_releases/santa_cruz/santa_cruz_freeze_entry.tsv')
read_annotations(annotations, 's3_transfer_scheduled', '../s3-transfer-operations/s3-transfer-jobs*/*/*.json')
read_annotations(annotations, 's3_transfer_completed', '../s3-transfer-operations/s3-transfer-jobs*/completed-jobs/*.json')
read_annotations(annotations, 'qc_donor_prioritization', 'qc_donor_prioritization.txt')
read_annotations(annotations, 'uuid_to_barcode', 'pc_annotation-tcga_uuid2barcode.tsv')
read_annotations(annotations, 'icgc_donor_id', '../pcawg-operations/lists/icgc_bioentity_ids/pc_annotation-icgc_donor_ids.csv')
read_annotations(annotations, 'icgc_specimen_id', '../pcawg-operations/lists/icgc_bioentity_ids/pc_annotation-icgc_specimen_ids.csv')
read_annotations(annotations, 'icgc_sample_id', '../pcawg-operations/lists/icgc_bioentity_ids/pc_annotation-icgc_sample_ids.csv')
read_annotations(annotations, 'pcawg_final_list', '../pcawg-operations/lists/pc_annotation-pcawg_final_list.tsv')
read_annotations(annotations, 'aliquot_blacklist', '../pcawg-operations/lists/blacklist/pc_annotation-aliquot_blacklist.tsv')
read_annotations(annotations, 'oxog_score', '../pcawg-operations/lists/quality_control_info/broad_qc_metrics.tsv')
read_annotations(annotations, 'ContEST', '../pcawg-operations/lists/quality_control_info/broad_qc_metrics.tsv')
read_annotations(annotations, 'Stars', '../pcawg-operations/lists/quality_control_info/PAWG_QC_Summary_of_Measures.tsv')
read_annotations(annotations, 'TiN', '../pcawg-operations/lists/quality_control_info/TiN_donor.TiNsorted.tsv')
for r in ['aug2015', 'oct2015', 'mar2016', 'may2016']:
read_annotations(annotations, r, '../pcawg-operations/data_releases/'+r+'/release_'+r+'_entry.tsv')
# hard-code the file name for now
train2_freeze_bams = read_train2_bams('../pcawg-operations/variant_calling/train2-lists/Data_Freeze_Train_2.0_GoogleDocs__2015_04_10_1150.tsv')
# pre-exclude gnos entries when this option is chosen
gnos_ids_to_be_excluded = set()
if exclude_gnos_id_lists:
files = glob.glob(exclude_gnos_id_lists)
for fname in files:
with open(fname) as f:
for d in f: gnos_ids_to_be_excluded.add(d.rstrip())
donor_fh = open(donor_output_jsonl_file, 'w')
bam_fh = open(bam_output_jsonl_file, 'w')
for f in get_xml_files( metadata_dir, conf, repo ):
f = conf.get('output_dir') + '/__all_metadata_xml/' + f
gnos_analysis = get_gnos_analysis(f)
#print (json.dumps(gnos_analysis)) # debug
if gnos_analysis:
logger.info( 'processing xml file: {} ...'.format(f) )
if gnos_analysis.get('analysis_id') and gnos_analysis.get('analysis_id') in gnos_ids_to_be_excluded:
logger.warning( 'skipping xml file: {} with analysis_id: {}, as it\'s in the list to be excluded' \
.format(f, gnos_analysis.get('analysis_id')) )
continue
process_gnos_analysis( gnos_analysis, donors, vcf_entries, es_index, es, bam_fh, annotations, consensus_entries)
else:
logger.warning( 'skipping invalid xml file: {}'.format(f) )
for donor_id in donors.keys():
donor = donors[donor_id]
process_donor(donor, annotations, vcf_entries, conf, train2_freeze_bams, consensus_entries)
# push to Elasticsearch
es.index(index=es_index, doc_type='donor', id=donor['donor_unique_id'], \
body=json.loads(json.dumps(donor, default=set_default)) )
del donor['bam_files'] # prune this before dumping JSON for Keiran
donor_fh.write(json.dumps(donor, default=set_default) + '\n')
donor_fh.close()
bam_fh.close()
def update_vcf_jamboree(infilenames, outfilename):
seen = set() # just for checking in case there are duplicated lines in jamboree files
with open(outfilename, 'w') as fout:
for f_index in infilenames:
with open(f_index,'r') as fin:
for line in fin:
if len(line.rstrip()) == 0: continue
if line in seen:
pass
else:
donor_unique_id, gnos_metadata_url, aliquot_id = str.split(line.rstrip(), '\t')
repo, gnos_id = str.split(gnos_metadata_url, 'cghub/metadata/analysisFull/')
fout. write(donor_unique_id+'\t'+gnos_id+'\n')
seen.add(line)
def read_train2_bams(filename):
train2_bams = {}
with open(filename, 'r') as r:
for line in r:
if line.startswith('dcc_project_code'): continue
if len(line.rstrip()) == 0: continue
dcc_project_code, donor_submitter_id, normal_aliquot_id, normal_aligned_bam_gnos_url,\
num_tumor_samples, tumor_aliquot_id, tumor_aligned_bam_gnos_urls = str.split(line.rstrip(), '\t')
normal_repo, normal_gnos_id = str.split(normal_aligned_bam_gnos_url, 'cghub/metadata/analysisFull/')
train2_bams[dcc_project_code + "::" + donor_submitter_id] = {}
train2_bams.get(dcc_project_code + "::" + donor_submitter_id)[normal_gnos_id] = \
{"repo": normal_repo, "aliquot_id": normal_aliquot_id, "specimen_type": "normal"}
tumor_aliquots = str.split(tumor_aliquot_id, ',')
tumor_urls = str.split(tumor_aligned_bam_gnos_urls, ',')
for tumor_aliquot_id, tumor_url in zip(tumor_aliquots, tumor_urls):
tumor_repo, tumor_gnos_id = str.split(tumor_url, 'cghub/metadata/analysisFull/')
train2_bams.get(dcc_project_code + "::" + donor_submitter_id)[tumor_gnos_id] = \
{"repo": tumor_repo, "aliquot_id": tumor_aliquot_id, "specimen_type": "tumor"}
return train2_bams
def read_annotations(annotations, type, file_name):
if type in ['s3_transfer_scheduled', 's3_transfer_completed']:
annotations[type] = set()
files = glob.glob(file_name)
for f in files:
fname = str.split(f, '/')[-1]
gnos_id = str.split(fname, '.')[0]
annotations[type].add(gnos_id)
else:
if not os.path.isfile(file_name):
return
with open(file_name, 'r') as r:
if annotations.get(type): # reset annotation if exists
del annotations[type]
if type == 'gnos_assignment':
annotations['gnos_assignment'] = {}
assignment = yaml.safe_load(r)
for repo, project_donors in assignment.iteritems():
for p_d in project_donors:
annotations['gnos_assignment'][p_d] = repo # key is project or donor unique id, value is repo
elif type == 'sanger_vcf_in_jamboree':
annotations['sanger_vcf_in_jamboree'] = {}
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
donor_id, ao_id = str.split(line.rstrip(), '\t')
annotations[type][donor_id] = ao_id
elif type in ['train2_donors', 'train2_pilot', 'donor_blacklist', 'manual_qc_failed', 'aliquot_blacklist']:
annotations[type] = set()
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
annotations[type].add(line.rstrip())
elif type in ['santa_cruz', 'aug2015', 'oct2015', 'mar2016', 'may2016']:
annotations[type] = {
'donor': set(),
'gnos_id': set()
}
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
donor_unique_id, gnos_id, entry_type = str.split(line.rstrip(), '\t')
annotations[type]['donor'].add(donor_unique_id)
annotations[type]['gnos_id'].add(gnos_id)
elif type == 'qc_donor_prioritization':
annotations[type] = {}
reader = csv.DictReader(r, delimiter='\t')
for row in reader:
annotations[type][row.get('Unique DonorId')] = int(row.get('Issue Summary'))
elif type == 'uuid_to_barcode':
annotations[type] = {}
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
TCGA_project, subtype, uuid, barcode = str.split(line.rstrip(), '\t')
uuid = detect_and_low_case_uuid(uuid)
annotations[type][uuid] = barcode
elif type in ['icgc_donor_id', 'icgc_sample_id', 'icgc_specimen_id']:
annotations[type] = {}
subtype = type.split('_')[1]
prefix = subtype[0:2]
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
icgc_id, id_pcawg, dcc_project_code, creation_release = str.split(line.rstrip(), ',')
id_pcawg = detect_and_low_case_uuid(id_pcawg)
annotations[type][dcc_project_code+'::'+id_pcawg] = prefix.upper()+icgc_id
elif type == 'pcawg_final_list':
annotations[type] = {
'donor': set(),
'specimen': set(),
'sample': set()
}
reader = csv.DictReader(r, delimiter='\t')
for row in reader:
annotations[type]['donor'].add(row.get('donor_unique_id'))
annotations[type]['specimen'].add(row.get('dcc_project_code')+'::'+row.get('submitter_specimen_id'))
annotations[type]['sample'].add(row.get('dcc_project_code')+'::'+row.get('submitter_sample_id'))
elif type in ['oxog_score', 'ContEST']:
annotations[type] = {}
reader = csv.DictReader(r, delimiter='\t')
for row in reader:
if not row.get('aliquot_GUUID'): continue
if type == 'oxog_score' and not row.get('picard_oxoQ'):
logger.warning('aliquot: {} has no oxog_score'.format(row.get('aliquot_GUUID')))
continue
if type == 'ContEST' and not row.get('contamination_percentage_whole_genome_no_array_value'):
logger.warning('aliquot: {} has no ContEST'.format(row.get('aliquot_GUUID')))
continue
annotations[type][row.get('aliquot_GUUID')] = row.get('picard_oxoQ') if type=='oxog_score' else row.get('contamination_percentage_whole_genome_no_array_value')
elif type == 'Stars':
annotations[type] = {}
reader = csv.DictReader(r, delimiter='\t')
for row in reader:
if not row.get('Tumour_WGS_aliquot_ID'): continue
if row.get('Stars') == 'NA':
logger.warning('aliquot:{} has no stars information.'.format(row.get('Tumour_WGS_aliquot_ID')))
continue
annotations[type][row.get('Tumour_WGS_aliquot_ID')] = row.get('Stars')
elif type == 'TiN':
annotations[type] = {}
reader = csv.DictReader(r, delimiter='\t')
for row in reader:
if not row.get('donor_unique_id'): continue
if not row.get('TiN_donor'):
logger.warning('donor:{} has no TiN information.'.format(row.get('donor_unique_id')))
continue
annotations[type][row.get('donor_unique_id')] = row.get('TiN_donor')
else:
logger.warning('unknown annotation type: {}'.format(type))
def process_donor(donor, annotations, vcf_entries, conf, train2_freeze_bams, consensus_entries):
logger.info( 'processing donor: {} ...'.format(donor.get('donor_unique_id')) )
# check whether all tumor specimen(s) aligned
if (donor.get('flags').get('aligned_tumor_specimen_aliquot_counts')
and donor.get('flags').get('aligned_tumor_specimen_aliquot_counts') == donor.get('flags').get('all_tumor_specimen_aliquot_counts')):
donor.get('flags')['are_all_tumor_specimens_aligned'] = True
# now build easy-to-use, specimen-level, gnos_repo-aware summary of bwa alignment status by iterating all collected bams
aggregated_bam_info = bam_aggregation(donor['bam_files'])
#print json.dumps(aggregated_bam_info, default=set_default) # debug only
# let's add this aggregated alignment information to donor object
if aggregated_bam_info.get('WGS'):
add_alignment_status_to_donor(donor, aggregated_bam_info.get('WGS'))
#print json.dumps(donor.get('tumor_alignment_status'), default=set_default) # debug only
#print (json.dumps(aggregated_bam_info.get('RNA-Seq'), default=set_default)) # debug only
if aggregated_bam_info.get('RNA-Seq'):
add_rna_seq_status_to_donor(donor, aggregated_bam_info.get('RNA-Seq'))
if donor.get('rna_seq').get('alignment').get('normal'):
aliquot = donor.get('rna_seq').get('alignment').get('normal')
if aliquot.get('tophat'):
donor.get('flags')['is_normal_tophat_rna_seq_alignment_performed'] = True
if aliquot.get('star'):
donor.get('flags')['is_normal_star_rna_seq_alignment_performed'] = True
if len(donor.get('rna_seq').get('alignment').get('tumor')) > 0:
for aliquot in donor.get('rna_seq').get('alignment').get('tumor'):
if aliquot.get('tophat'):
donor.get('flags')['is_tumor_tophat_rna_seq_alignment_performed'] = True
if aliquot.get('star'):
donor.get('flags')['is_tumor_star_rna_seq_alignment_performed'] = True
# # for debug
# if donor.get('donor_unique_id') == 'OV-AU::AOCS-141':
# print json.dumps(aggregated_bam_info.get('RNA-Seq'), default=set_default)
# print json.dumps(donor.get('rna_seq').get('alignment'), default=set_default)
# sys.exit(0)
if donor.get('normal_alignment_status') and donor.get('normal_alignment_status').get('aligned'):
donor.get('flags')['is_normal_specimen_aligned'] = True
# add gnos repos where complete alignments for the current donor are available
add_gnos_repos_with_complete_alignment_set(donor)
# add gnos repos where one alignment or all alignments for the current donor are available
add_gnos_repos_with_alignment_result(donor)
# add original gnos repo assignment, this is based on a manually maintained yaml file
add_original_gnos_repo(donor, annotations['gnos_assignment'])
if donor.get('flags').get('is_normal_specimen_aligned') and not donor.get('original_gnos_assignment'):
logger.warning('donor with normal aligned but gnos_for_originally_aligned_at is empty, please update gnos assignment annotation for donor: {} with {}'
.format(donor.get('donor_unique_id'), conf.get(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo')[0])))
# it should be pretty safe to assign it automatically for this freshly aligned normal specimen
donor['original_gnos_assignment'] = conf.get(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo')[0])
add_train2_donor_flag(donor, train2_freeze_bams)
add_train2_pilot_flag(donor, annotations['train2_pilot'])
add_donor_blacklist_flag(donor, annotations['donor_blacklist'])
add_manual_qc_failed_flag(donor, annotations['manual_qc_failed'])
donor.get('flags')['is_sanger_vcf_in_jamboree'] = False
if donor.get('donor_unique_id') in annotations.get('sanger_vcf_in_jamboree'):
donor.get('flags')['is_sanger_vcf_in_jamboree'] = True
donor.get('flags').get('vcf_in_jamboree').append('sanger')
# choose vcf to vcf_entries by iterating all cached vcfs
choose_vcf_entry(vcf_entries, donor.get('donor_unique_id'), annotations)
# re-organize dkfz/embl variant call results, this function does two things:
# 1. when the combined dkfz/embl call exists, remove the separate ones
# 2. create combined dkfz/embl variant call entry stub when the real combined
# one does not exist yet, but the two separate call results do exist
reorganize_dkfz_embl_calls(vcf_entries.get(donor.get('donor_unique_id')))
add_vcf_entry(donor, vcf_entries.get(donor.get('donor_unique_id')))
# choose consensus by iterating all cached vcfs
choose_consensus_entry(consensus_entries, donor.get('donor_unique_id'))
# add the consensus to donor
add_consensus_entry(donor, consensus_entries.get(donor.get('donor_unique_id')))
check_bwa_duplicates(donor, train2_freeze_bams)
reshape_minibam(donor)
def reshape_minibam(donor):
if not donor.get('variant_calling_results'): return
if not donor.get('normal_alignment_status') or not donor.get('tumor_alignment_status'): return
if not donor.get('normal_alignment_status').get('aligned_bam'): return
if not donor.get('variant_calling_results').get('minibam_variant_calling'): return
minibam_info = donor.get('variant_calling_results').get('minibam_variant_calling')
# add minibam to normal
wgs_normal_alignment = donor.get('normal_alignment_status').get('aligned_bam')
donor.get('normal_alignment_status')['minibam'] = create_minibam(wgs_normal_alignment, minibam_info)
# add minibam to tumour
for aliquot in donor.get('tumor_alignment_status'):
wgs_tumor_alignment = aliquot.get('aligned_bam')
aliquot['minibam'] = create_minibam(wgs_tumor_alignment, minibam_info)
return donor
def create_minibam(alignment, minibam_info):
minibam_entry = {
"gnos_id": minibam_info['gnos_id'],
"effective_xml_md5sum": minibam_info['effective_xml_md5sum'],
"gnos_last_modified": minibam_info['gnos_last_modified'],
"gnos_repo": minibam_info['gnos_repo'],
"is_santa_cruz_entry": minibam_info['is_santa_cruz_entry'],
"is_aug2015_entry": minibam_info['is_aug2015_entry'],
"is_oct2015_entry": minibam_info['is_oct2015_entry'],
"is_mar2016_entry": minibam_info['is_mar2016_entry'],
"is_may2016_entry": minibam_info['is_may2016_entry'],
"is_s3_transfer_scheduled": minibam_info['is_s3_transfer_scheduled'],
"is_s3_transfer_completed": minibam_info['is_s3_transfer_completed']
}
minibam_files = minibam_info.get('files')
if not minibam_files:
logger.warning('The minibam with gnos_id {} is missing files.'.format(minibam_entry.get('gnos_id')))
return
for ftype in ['bam', 'bai']:
for f in minibam_files:
if not f.get('file_name').replace('_minibam', '') == alignment.get(ftype+'_file_name'): continue
for feature in ['name', 'size', 'md5sum']:
minibam_entry[ftype+'_file_'+feature] = f.get('file_'+feature) if f.get('file_'+feature) else None
for feature in ['name', 'size', 'md5sum']:
if not minibam_entry.get(ftype+'_file_'+feature):
logger.warning('The minibam with gnos_id: {} is missing {} data for file {}.'.format(minibam_entry.get('gnos_id'), feature, alignment.get(ftype+'_file_name')))
return minibam_entry
def reorganize_dkfz_embl_calls(vcf_entries):
if not vcf_entries: return
variant_call_types = set()
for key in vcf_entries:
if not key.endswith('_variant_calling'): continue
variant_call_types.add(key)
if 'dkfz_embl_variant_calling' in variant_call_types:
if vcf_entries.get('embl_variant_calling'):
logger.warning('Combined dkfz/embl call exists with gnos_id: {}, removing embl call entry with gnos_id: {}'\
.format(vcf_entries.get('dkfz_embl_variant_calling').get('gnos_id'), vcf_entries.get('embl_variant_calling').get('gnos_id')))
vcf_entries.pop('embl_variant_calling')
if vcf_entries.get('dkfz_variant_calling'):
logger.warning('Combined dkfz/embl call exists with gnos_id: {}, removing dkfz call entry with gnos_id: {}'\
.format(vcf_entries.get('dkfz_embl_variant_calling').get('gnos_id'), vcf_entries.get('dkfz_variant_calling').get('gnos_id')))
vcf_entries.pop('dkfz_variant_calling')
elif 'embl_variant_calling' in variant_call_types and 'dkfz_variant_calling' in variant_call_types:
# now create the combined dkfz_embl_variant_calling stub
vcf_entries.update({
'dkfz_embl_variant_calling': {
'gnos_id': vcf_entries.get('embl_variant_calling').get('gnos_id'),
'gnos_repo': vcf_entries.get('embl_variant_calling').get('gnos_repo'), # this is needed for reporting purpose, get it from embl
'gnos_published_date': vcf_entries.get('embl_variant_calling').get('gnos_published_date'),
'is_stub': True
}
})
def check_bwa_duplicates(donor, train2_freeze_bams):
duplicated_bwa_alignment_summary = {
'exists_gnos_xml_mismatch': False,
'exists_gnos_xml_mismatch_in_normal': False,
'exists_gnos_xml_mismatch_in_tumor': False,
'exists_mismatch_bwa_bams': False,
'exists_mismatch_bwa_bams_in_normal': False,
'exists_mismatch_bwa_bams_in_tumor': False,
'exists_gnos_id_mismatch': False,
'exists_gnos_id_mismatch_in_normal': False,
'exists_gnos_id_mismatch_in_tumor': False,
'exists_md5sum_mismatch': False,
'exists_md5sum_mismatch_in_normal': False,
'exists_md5sum_mismatch_in_tumor': False,
'exists_version_mismatch': False,
'exists_version_mismatch_in_normal': False,
'exists_version_mismatch_in_tumor': False,
'exists_md5sum_mismatch_between_train2_marked_and_sanger_used': False,
'exists_version_mismatch_between_train2_marked_and_sanger_used': False,
'is_santa_cruz_freeze_bam_missing': False,
'is_santa_cruz_freeze_normal_bam_missing': False,
'is_santa_cruz_freeze_tumor_bam_missing': False,
'is_train2_freeze_bam_missing': False,
'is_train2_freeze_normal_bam_missing': False,
'is_train2_freeze_tumor_bam_missing': False,
'is_bam_used_by_sanger_missing': False,
'is_normal_bam_used_by_sanger_missing': False,
'is_tumor_bam_used_by_sanger_missing': False,
'normal': {},
'_tmp_tumor': {},
'tumor': []
}
aliquots = {}
duplicated_bwa = False
for bam_file in donor.get('bam_files'):
if not bam_file.get('is_aligned'): continue
# not do it for RNA-Seq Bams
if bam_file.get('library_strategy') == 'RNA-Seq': continue
if aliquots.get(bam_file.get('aliquot_id')): # exists already
duplicated_bwa = True
aliquots.get(bam_file.get('aliquot_id')).append(bam_file)
else:
aliquots[bam_file.get('aliquot_id')] = [bam_file]
if True or duplicated_bwa: # Let's do this for all donors
for aliquot in aliquots:
for bam_file in aliquots.get(aliquot):
if 'normal' in bam_file.get('dcc_specimen_type').lower():
if duplicated_bwa_alignment_summary.get('normal'):
duplicated_bwa_alignment_summary.get('normal').get('aligned_bam').append(
{
'gnos_id': bam_file.get('bam_gnos_ao_id'),
'gnos_repo': bam_file.get('gnos_repo'),
'md5sum': bam_file.get('md5sum'),
'effective_xml_md5sum': bam_file.get('effective_xml_md5sum'),
'upload_date': bam_file.get('upload_date'),
'published_date': bam_file.get('published_date'),
'last_modified': bam_file.get('last_modified'),
'bwa_workflow_version': bam_file.get('alignment').get('workflow_version'),
'is_train2_bam': is_train2_bam(donor, train2_freeze_bams, bam_file.get('bam_gnos_ao_id'), 'normal'),
'is_used_in_sanger_variant_call': is_used_in_sanger_variant_call(donor,
bam_file.get('bam_gnos_ao_id')),
'is_santa_cruz_entry': bam_file.get('is_santa_cruz_entry'),
'is_aug2015_entry': bam_file.get('is_aug2015_entry'),
'is_oct2015_entry': bam_file.get('is_oct2015_entry'),
'is_mar2016_entry': bam_file.get('is_mar2016_entry'),
'is_may2016_entry': bam_file.get('is_may2016_entry')
}
)
else:
duplicated_bwa_alignment_summary['normal'] = {
'aliquot_id': aliquot,
'dcc_specimen_type': bam_file.get('dcc_specimen_type'),
'aligned_bam': [
{
'gnos_id': bam_file.get('bam_gnos_ao_id'),
'gnos_repo': bam_file.get('gnos_repo'),
'md5sum': bam_file.get('md5sum'),
'effective_xml_md5sum': bam_file.get('effective_xml_md5sum'),
'upload_date': bam_file.get('upload_date'),
'published_date': bam_file.get('published_date'),
'last_modified': bam_file.get('last_modified'),
'bwa_workflow_version': bam_file.get('alignment').get('workflow_version'),
'is_train2_bam': is_train2_bam(donor, train2_freeze_bams, bam_file.get('bam_gnos_ao_id'), 'normal'),
'is_used_in_sanger_variant_call': is_used_in_sanger_variant_call(donor,
bam_file.get('bam_gnos_ao_id')),
'is_santa_cruz_entry': bam_file.get('is_santa_cruz_entry'),
'is_aug2015_entry': bam_file.get('is_aug2015_entry'),
'is_oct2015_entry': bam_file.get('is_oct2015_entry'),
'is_mar2016_entry': bam_file.get('is_mar2016_entry'),
'is_may2016_entry': bam_file.get('is_may2016_entry')
}
]
}
else: # tumor
if not duplicated_bwa_alignment_summary.get('_tmp_tumor').get(aliquot):
duplicated_bwa_alignment_summary.get('_tmp_tumor')[aliquot] = {
'aliquot_id': aliquot,
'dcc_specimen_type': bam_file.get('dcc_specimen_type'),
'aligned_bam': []
}
duplicated_bwa_alignment_summary.get('_tmp_tumor').get(aliquot).get('aligned_bam').append(
{
'gnos_id': bam_file.get('bam_gnos_ao_id'),
'gnos_repo': bam_file.get('gnos_repo'),
'md5sum': bam_file.get('md5sum'),
'effective_xml_md5sum': bam_file.get('effective_xml_md5sum'),
'upload_date': bam_file.get('upload_date'),
'published_date': bam_file.get('published_date'),
'last_modified': bam_file.get('last_modified'),
'bwa_workflow_version': bam_file.get('alignment').get('workflow_version'),
'is_train2_bam': is_train2_bam(donor, train2_freeze_bams, bam_file.get('bam_gnos_ao_id'), 'tumor'),
'is_used_in_sanger_variant_call': is_used_in_sanger_variant_call(donor,
bam_file.get('bam_gnos_ao_id')),
'is_santa_cruz_entry': bam_file.get('is_santa_cruz_entry'),
'is_aug2015_entry': bam_file.get('is_aug2015_entry'),
'is_oct2015_entry': bam_file.get('is_oct2015_entry'),
'is_mar2016_entry': bam_file.get('is_mar2016_entry'),
'is_may2016_entry': bam_file.get('is_may2016_entry')
}
)
for aliquot in duplicated_bwa_alignment_summary.get('_tmp_tumor'):
duplicated_bwa_alignment_summary.get('tumor').append(duplicated_bwa_alignment_summary.get('_tmp_tumor').get(aliquot))
del duplicated_bwa_alignment_summary['_tmp_tumor']
# scan normal BAMs
if duplicated_bwa_alignment_summary.get('normal'):
b_gnos_id = None
b_md5sum = None
xml_md5sum = None
b_version = None
has_santa_cruz_n_bam = False
has_train2_n_bam = False
has_sanger_n_bam = False
count_is_train2_not_sanger = 0
count_not_train2_is_sanger = 0
count_is_train2_is_sanger = 0
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = False
duplicated_bwa_alignment_summary.get('normal')['exists_gnos_id_mismatch'] = False
duplicated_bwa_alignment_summary.get('normal')['exists_gnos_xml_mismatch'] = False
duplicated_bwa_alignment_summary.get('normal')['exists_md5sum_mismatch'] = False
duplicated_bwa_alignment_summary.get('normal')['exists_version_mismatch'] = False
for bam in duplicated_bwa_alignment_summary.get('normal').get('aligned_bam'):
is_santa_cruz_n_bam = bam.get('is_santa_cruz_entry')
if is_santa_cruz_n_bam: has_santa_cruz_n_bam = True
is_train2_n_bam = bam.get('is_train2_bam')
if is_train2_n_bam: has_train2_n_bam = True
is_sanger_n_bam = bam.get('is_used_in_sanger_variant_call')
if is_sanger_n_bam: has_sanger_n_bam = True
if is_train2_n_bam and not is_sanger_n_bam: count_is_train2_not_sanger += 1
if not is_train2_n_bam and is_sanger_n_bam: count_not_train2_is_sanger += 1
if is_train2_n_bam and is_sanger_n_bam: count_is_train2_is_sanger += 1
if not b_gnos_id: b_gnos_id = bam.get('gnos_id')
if b_gnos_id and not b_gnos_id == bam.get('gnos_id'):
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch'] = True
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch_in_normal'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_normal'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_gnos_id_mismatch'] = True
if not b_md5sum: b_md5sum = bam.get('md5sum')
if b_md5sum and not b_md5sum == bam.get('md5sum'):
duplicated_bwa_alignment_summary['exists_md5sum_mismatch'] = True
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_in_normal'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_normal'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_md5sum_mismatch'] = True
if not xml_md5sum: xml_md5sum = bam.get('effective_xml_md5sum')
if xml_md5sum and not xml_md5sum == bam.get('effective_xml_md5sum'):
duplicated_bwa_alignment_summary['exists_gnos_xml_mismatch'] = True
duplicated_bwa_alignment_summary['exists_gnos_xml_mismatch_in_normal'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_gnos_xml_mismatch'] = True
if not b_version: b_version = bam.get('bwa_workflow_version')
if b_version and not b_version == bam.get('bwa_workflow_version'):
duplicated_bwa_alignment_summary['exists_version_mismatch'] = True
duplicated_bwa_alignment_summary['exists_version_mismatch_in_normal'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_normal'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_version_mismatch'] = True
if donor.get('flags').get('is_santa_cruz_donor') and not has_santa_cruz_n_bam:
duplicated_bwa_alignment_summary['is_santa_cruz_freeze_bam_missing'] = True
duplicated_bwa_alignment_summary['is_santa_cruz_freeze_normal_bam_missing'] = True
if donor.get('flags').get('is_train2_donor') and not has_train2_n_bam:
duplicated_bwa_alignment_summary['is_train2_freeze_bam_missing'] = True
duplicated_bwa_alignment_summary['is_train2_freeze_normal_bam_missing'] = True
if donor.get('flags').get('is_sanger_variant_calling_performed') and not has_sanger_n_bam:
duplicated_bwa_alignment_summary['is_bam_used_by_sanger_missing'] = True
duplicated_bwa_alignment_summary['is_normal_bam_used_by_sanger_missing'] = True
if donor.get('flags').get('is_train2_donor') and \
donor.get('flags').get('is_sanger_variant_calling_performed') and \
not count_is_train2_is_sanger and \
count_is_train2_not_sanger and count_not_train2_is_sanger:
if duplicated_bwa_alignment_summary['exists_md5sum_mismatch']:
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_between_train2_marked_and_sanger_used'] = True
if duplicated_bwa_alignment_summary['exists_version_mismatch']:
duplicated_bwa_alignment_summary['exists_version_mismatch_between_train2_marked_and_sanger_used'] = True
# scan tumor BAMs
if duplicated_bwa_alignment_summary.get('tumor'):
for aliquot in duplicated_bwa_alignment_summary.get('tumor'):
b_gnos_id = None
b_md5sum = None
xml_md5sum = None
b_version = None
has_santa_cruz_t_bam = False
has_train2_t_bam = False
has_sanger_t_bam = False
count_is_train2_not_sanger = 0
count_not_train2_is_sanger = 0
count_is_train2_is_sanger = 0
aliquot['exists_mismatch_bwa_bams'] = False
aliquot['exists_gnos_id_mismatch'] = False
aliquot['exists_gnos_xml_mismatch'] = False
aliquot['exists_md5sum_mismatch'] = False
aliquot['exists_version_mismatch'] = False
for bam in aliquot.get('aligned_bam'):
is_santa_cruz_t_bam = bam.get('is_santa_cruz_entry')
if is_santa_cruz_t_bam: has_santa_cruz_t_bam = True
is_train2_t_bam = bam.get('is_train2_bam')
if is_train2_t_bam: has_train2_t_bam = True
is_sanger_t_bam = bam.get('is_used_in_sanger_variant_call')
if is_sanger_t_bam: has_sanger_t_bam = True
if is_train2_t_bam and not is_sanger_t_bam: count_is_train2_not_sanger += 1
if not is_train2_t_bam and is_sanger_t_bam: count_not_train2_is_sanger += 1
if is_train2_t_bam and is_sanger_t_bam: count_is_train2_is_sanger += 1
if not b_gnos_id: b_gnos_id = bam.get('gnos_id')
if b_gnos_id and not b_gnos_id == bam.get('gnos_id'):
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch'] = True
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch_in_tumor'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_tumor'] = True
aliquot['exists_mismatch_bwa_bams'] = True
aliquot['exists_gnos_id_mismatch'] = True
if not b_md5sum: b_md5sum = bam.get('md5sum')
if b_md5sum and not b_md5sum == bam.get('md5sum'):
duplicated_bwa_alignment_summary['exists_md5sum_mismatch'] = True
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_in_tumor'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_tumor'] = True
aliquot['exists_mismatch_bwa_bams'] = True
aliquot['exists_md5sum_mismatch'] = True
if not xml_md5sum: xml_md5sum = bam.get('effective_xml_md5sum')
if xml_md5sum and not xml_md5sum == bam.get('effective_xml_md5sum'):
duplicated_bwa_alignment_summary['exists_gnos_xml_mismatch'] = True
duplicated_bwa_alignment_summary['exists_gnos_xml_mismatch_in_tumor'] = True
aliquot['exists_gnos_xml_mismatch'] = True
if not b_version: b_version = bam.get('bwa_workflow_version')
if b_version and not b_version == bam.get('bwa_workflow_version'):
duplicated_bwa_alignment_summary['exists_version_mismatch'] = True
duplicated_bwa_alignment_summary['exists_version_mismatch_in_tumor'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_tumor'] = True
aliquot['exists_version_mismatch'] = True
aliquot['exists_mismatch_bwa_bams'] = True
if donor.get('flags').get('is_santa_cruz_donor') and not has_santa_cruz_t_bam:
duplicated_bwa_alignment_summary['is_santa_cruz_freeze_bam_missing'] = True
duplicated_bwa_alignment_summary['is_santa_cruz_freeze_tumor_bam_missing'] = True
if donor.get('flags').get('is_train2_donor') and not has_train2_t_bam:
duplicated_bwa_alignment_summary['is_train2_freeze_bam_missing'] = True
duplicated_bwa_alignment_summary['is_train2_freeze_tumor_bam_missing'] = True
if donor.get('flags').get('is_sanger_variant_calling_performed') and not has_sanger_t_bam:
duplicated_bwa_alignment_summary['is_bam_used_by_sanger_missing'] = True
duplicated_bwa_alignment_summary['is_tumor_bam_used_by_sanger_missing'] = True
if donor.get('flags').get('is_train2_donor') and \
donor.get('flags').get('is_sanger_variant_calling_performed') and \
not count_is_train2_is_sanger and \
count_is_train2_not_sanger and count_not_train2_is_sanger:
if duplicated_bwa_alignment_summary['exists_md5sum_mismatch']:
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_between_train2_marked_and_sanger_used'] = True
if duplicated_bwa_alignment_summary['exists_version_mismatch']:
duplicated_bwa_alignment_summary['exists_version_mismatch_between_train2_marked_and_sanger_used'] = True
donor['duplicated_bwa_alignment_summary'] = duplicated_bwa_alignment_summary
def is_used_in_sanger_variant_call(donor, gnos_id):
if donor.get('variant_calling_results') and donor.get('variant_calling_results').get('sanger_variant_calling'):
for input_gnos_entry in donor.get('variant_calling_results').get('sanger_variant_calling') \
.get('workflow_details').get('variant_pipeline_input_info'):
if gnos_id == input_gnos_entry.get('attributes').get('analysis_id'): return True
return False
def is_train2_bam(donor, train2_freeze_bams, gnos_id, specimen_type):
if donor.get('donor_unique_id') and train2_freeze_bams.get(donor.get('donor_unique_id')) \
and train2_freeze_bams.get(donor.get('donor_unique_id')).get(gnos_id):
if not specimen_type == train2_freeze_bams.get(donor.get('donor_unique_id')).get(gnos_id).get('specimen_type'):
logger.warning('This should never happen: specimen type mismatch in train2 list in donor {}'
.format(donor.get('donor_unique_id')))
return True
return False
def add_consensus_entry(donor, consensus_entry):
if not consensus_entry:
return
donor['consensus_files'] = copy.deepcopy(consensus_entry.get('consensus_entry_files'))
del consensus_entry['consensus_entry_files']
for ct in ['somatic']:
if not donor.get('consensus_'+ct+'_variant_calls'): donor['consensus_'+ct+'_variant_calls'] = {}
for k, v in consensus_entry.iteritems():
if k in ['indel', 'snv_mnv', 'sv', 'cnv']:
donor.get('consensus_somatic_variant_calls').update({k: v})
return donor
def add_vcf_entry(donor, vcf_entry):
if not vcf_entry:
return
if not donor.get('variant_calling_results'): donor['variant_calling_results'] = {}
donor['vcf_files'] = copy.deepcopy(vcf_entry.get('vcf_entry_files'))
del vcf_entry['vcf_entry_files']
donor.get('variant_calling_results').update(vcf_entry)
# update the flags inside each vcf
for workflow in ['sanger', 'embl', 'dkfz', 'dkfz_embl', 'broad', 'muse', 'broad_tar']:
if donor.get('variant_calling_results').get(workflow + '_variant_calling'):
# if this is a stub for dkfz_embl call, skip the rest
if workflow == 'dkfz_embl' and donor.get('variant_calling_results').get(workflow + '_variant_calling').get('is_stub'): continue
# add code to handle 'DKFZ_EMBL_Merged' workflow: use the dkfz output as the merged_workflow output
if workflow == 'dkfz_embl' and \
donor.get('variant_calling_results').get(workflow + '_variant_calling').get('workflow_details').get('variant_workflow_name') == 'DKFZ_EMBL_Merged':
vcf_output_list = donor.get('variant_calling_results').get(workflow + '_variant_calling').get('workflow_details').get('variant_pipeline_output_info').get('dkfz').get('workflow_outputs')
else:
vcf_output_list = donor.get('variant_calling_results').get(workflow + '_variant_calling').get('workflow_details').get('variant_pipeline_output_info')
if not donor.get('flags').get('all_tumor_specimen_aliquot_counts') + 1 == len(vcf_output_list):
logger.warning(workflow + ' variant calling workflow may have missed tumour specimen for donor: {}'
.format(donor.get('donor_unique_id')))
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_output_and_tumour_specimen_counts_mismatch'] = True
else:
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_output_and_tumour_specimen_counts_mismatch'] = False
# add the flags of is_bam_used_by_{{workflow}}_missing, is_normal_bam_used_by_{{workflow}}_missing, is_tumor_bam_used_by_{{workflow}}_missing
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_bam_used_by_' + workflow + '_missing'] = False
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_normal_bam_used_by_' + workflow + '_missing'] = False
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_tumor_bam_used_by_' + workflow + '_missing'] = False
has_n_bam = False
vcf_input_t_bam = set()
tumor_alignment_bam = set()
# scan all the vcf input under "variant_pipeline_input_info"
for vcf_input in donor.get('variant_calling_results').get(workflow + '_variant_calling').get('workflow_details').get('variant_pipeline_input_info'):
if 'normal' in vcf_input.get('attributes').get('dcc_specimen_type').lower():
# added more checks to avoid key not exist error
if donor.get('normal_alignment_status') and \
donor.get('normal_alignment_status').get('aligned_bam') and \
donor.get('normal_alignment_status').get('aligned_bam').get('gnos_id') and \
vcf_input.get('attributes').get('analysis_id') == donor.get('normal_alignment_status').get('aligned_bam').get('gnos_id'): #check normal alignment
has_n_bam = True
elif 'tumour' in vcf_input.get('attributes').get('dcc_specimen_type').lower(): # check the tumor
vcf_input_t_bam.add((vcf_input.get('specimen'), vcf_input.get('attributes').get('analysis_id')))
else:
logger.warning('invalid specimen type: {} in donor: {} with aliquot_id: {}'
.format(vcf_input.get('attributes').get('dcc_specimen_type'), donor.get('donor_unique_id'), vcf_input.get('specimen'))
)
# scan all the bams in tumor_alignment_status
if donor.get('tumor_alignment_status'):
for tumor_alignment in donor.get('tumor_alignment_status'):
if tumor_alignment.get('aligned_bam') and tumor_alignment.get('aligned_bam').get('gnos_id'): # avoid key not exist error
tumor_alignment_bam.add((tumor_alignment.get('aliquot_id'), tumor_alignment.get('aligned_bam').get('gnos_id')))
if not has_n_bam:
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_normal_bam_used_by_' + workflow + '_missing'] = True
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_bam_used_by_' + workflow + '_missing'] = True
donor.get('flags')['is_bam_used_by_variant_calling_missing'] = True
if vcf_input_t_bam != tumor_alignment_bam:
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_tumor_bam_used_by_' + workflow + '_missing'] = True
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_bam_used_by_' + workflow + '_missing'] = True
donor.get('flags')['is_bam_used_by_variant_calling_missing'] = True
# add the flags of exists_{workflow}_file_prefix_mismatch
donor.get('variant_calling_results').get(workflow + '_variant_calling')['exists_' + workflow + '_file_prefix_mismatch'] = False
# update the flags of exists_xml_md5sum_mismatch
if donor.get('variant_calling_results').get(workflow + '_variant_calling').get('exists_xml_md5sum_mismatch'):
donor.get('flags')['exists_xml_md5sum_mismatch'] = True
# scan all the files under **_variant_calling
file_prefix = set()
for f in donor.get('variant_calling_results').get(workflow + '_variant_calling').get('files'):
file_prefix.add(f.get('file_name').split('.')[0])
if not file_prefix == donor.get('all_tumor_specimen_aliquots'):
donor.get('variant_calling_results').get(workflow + '_variant_calling')['exists_' + workflow + '_file_prefix_mismatch'] = True
donor.get('flags')['exists_vcf_file_prefix_mismatch'] = True
# update the flags for sanger, dkfz_embl
for workflow in ['sanger', 'dkfz_embl', 'embl', 'dkfz', 'oxog', 'minibam']:
if donor.get('variant_calling_results').get(workflow + '_variant_calling'):
donor.get('flags')['is_' + workflow + '_variant_calling_performed'] = True
donor.get('flags').get('variant_calling_performed').append(workflow)
#one combined flag for broad to indicate whether broad is performed well
#donor.get('flags')['exists_mismatch_broad_file_subsets'] = False
is_broad_file_subset_missing = False
broad_file_subsets = set()
for workflow in ['broad', 'muse', 'broad_tar']:
if donor.get('variant_calling_results').get(workflow + '_variant_calling'):
donor.get('flags').get('broad')[workflow+'_file_subset_exist'] = True
vcf = donor.get('variant_calling_results').get(workflow + '_variant_calling')
if not vcf.get('workflow_details') or not vcf.get('workflow_details').get('related_file_subset_uuids') or not vcf.get('gnos_id'):
logger.warning('{} variant calling information for donor: {} is not completely populated'.format(workflow.upper(), donor.get('donor_unique_id')))
else:
current_broad_file_subsets = set(vcf.get('workflow_details').get('related_file_subset_uuids')) | set([vcf.get('gnos_id')])
if not broad_file_subsets: broad_file_subsets = current_broad_file_subsets
if broad_file_subsets and not current_broad_file_subsets == broad_file_subsets:
donor.get('flags').get('broad')['exist_file_subsets_mismatch'] = True
else:
is_broad_file_subset_missing = True
if not is_broad_file_subset_missing and not donor.get('flags').get('broad')['exist_file_subsets_mismatch']:
donor.get('flags')['is_broad_variant_calling_performed'] = True
donor.get('flags').get('variant_calling_performed').append('broad')
def add_original_gnos_repo(donor, annotation):
if donor.get('gnos_repo'):
del donor['gnos_repo'] # get rid of this rather confusing old flag
if annotation.get(donor.get('donor_unique_id')):
donor['original_gnos_assignment'] = annotation.get(donor.get('donor_unique_id'))
elif annotation.get(donor.get('dcc_project_code')):
donor['original_gnos_assignment'] = annotation.get(donor.get('dcc_project_code'))
else:
donor['original_gnos_assignment'] = None
def add_train2_donor_flag(donor, train2_freeze_bams):
if train2_freeze_bams.get(donor.get('donor_unique_id')):
donor.get('flags')['is_train2_donor'] = True
else:
donor.get('flags')['is_train2_donor'] = False
def add_train2_pilot_flag(donor, annotation):
if donor.get('donor_unique_id') in annotation:
donor.get('flags')['is_train2_pilot'] = True
else:
donor.get('flags')['is_train2_pilot'] = False
def add_donor_blacklist_flag(donor, annotation):
if donor.get('donor_unique_id') in annotation:
donor.get('flags')['is_donor_blacklisted'] = True
else:
donor.get('flags')['is_donor_blacklisted'] = False
def add_manual_qc_failed_flag(donor, annotation):
if donor.get('donor_unique_id') in annotation:
donor.get('flags')['is_manual_qc_failed'] = True
else:
donor.get('flags')['is_manual_qc_failed'] = False
def add_gnos_repos_with_alignment_result(donor):
repos = set()
if (donor.get('normal_alignment_status')
and donor.get('normal_alignment_status').get('aligned_bam')):
repos = set(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo'))
if donor.get('tumor_alignment_status'):
for t in donor.get('tumor_alignment_status'):
if t.get('aligned_bam'):
repos.update(set(t.get('aligned_bam').get('gnos_repo')))
donor['gnos_repos_with_alignment_result'] = repos
def add_gnos_repos_with_complete_alignment_set(donor):
repos = set()
if (donor.get('normal_alignment_status')
and donor.get('normal_alignment_status').get('aligned_bam')):
repos = set(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo'))
if repos and donor.get('tumor_alignment_status'):
for t in donor.get('tumor_alignment_status'):
if t.get('aligned_bam'):
repos = set.intersection(repos, set(t.get('aligned_bam').get('gnos_repo')))
else:
repos = set()
else:
repos = set()
donor['gnos_repos_with_complete_alignment_set'] = repos
'''
# this flag is not entirely accurate, disable it for now
if repos:
donor['is_alignment_completed'] = True
else:
donor['is_alignment_completed'] = False
'''
def add_rna_seq_status_to_donor(donor, aggregated_bam_info):
for aliquot_id in aggregated_bam_info.keys():
alignment_status = aggregated_bam_info.get(aliquot_id)
if (alignment_status.get('tophat') and 'normal' in alignment_status.get('tophat').get('dcc_specimen_type').lower()) or \
(alignment_status.get('star') and 'normal' in alignment_status.get('star').get('dcc_specimen_type').lower()): # normal specimen
if not donor.get('rna_seq').get('alignment').get('normal'): #no normal yet in RNA-Seq alignment of this donor
donor.get('rna_seq').get('alignment')['normal'] = alignment_status
if alignment_status.get('tophat') and alignment_status.get('tophat').get('exists_xml_md5sum_mismatch') or \
alignment_status.get('star') and alignment_status.get('star').get('exists_xml_md5sum_mismatch'):
donor.get('flags')['exists_xml_md5sum_mismatch'] = True
else:
logger.warning('more than one RNA-Seq normal aliquot found in donor: {}'.format(donor.get('donor_unique_id')))
elif (alignment_status.get('tophat') and 'tumour' in alignment_status.get('tophat').get('dcc_specimen_type').lower()) or \
(alignment_status.get('star') and 'tumour' in alignment_status.get('star').get('dcc_specimen_type').lower()):
if not donor.get('rna_seq').get('alignment').get('tumor'): #no tumor yet in RNA-Seq alignment of this donor
donor.get('rna_seq').get('alignment')['tumor'] = []
donor.get('rna_seq').get('alignment')['tumor'].append(copy.deepcopy(alignment_status))
if alignment_status.get('tophat') and alignment_status.get('tophat').get('exists_xml_md5sum_mismatch') or \
alignment_status.get('star') and alignment_status.get('star').get('exists_xml_md5sum_mismatch'):
donor.get('flags')['exists_xml_md5sum_mismatch'] = True
else:
logger.warning('invalid aliquot_id: {} in donor: {} '
.format(aliquot_id, donor.get('donor_unique_id'))
)
def add_alignment_status_to_donor(donor, aggregated_bam_info):
for aliquot_id in aggregated_bam_info.keys():
alignment_status = aggregated_bam_info.get(aliquot_id)
if 'normal' in alignment_status.get('dcc_specimen_type').lower(): # normal specimen
if not donor.get('normal_alignment_status'): # no normal yet in this donor, this is good
donor['normal_alignment_status'] = reorganize_unaligned_bam_info(alignment_status)
if alignment_status.get('exists_xml_md5sum_mismatch'):
donor.get('flags')['exists_xml_md5sum_mismatch'] = True
else: # another normal with different aliquot_id! this is no good
logger.warning('donor: {} has more than one normal, in use aliquot_id: {}, additional aliquot_id found: {}'
.format(donor.get('donor_unique_id'),
donor.get('normal_alignment_status').get('aliquot_id'),
aliquot_id)
)
elif 'tumour' in alignment_status.get('dcc_specimen_type').lower(): # tumour specimen
if not donor.get('tumor_alignment_status'):
donor['tumor_alignment_status'] = []
_tmp_sample_id = []
donor['tumor_alignment_status'].append(reorganize_unaligned_bam_info(alignment_status))
if alignment_status.get('exists_xml_md5sum_mismatch'):
donor.get('flags')['exists_xml_md5sum_mismatch'] = True
if alignment_status.get('submitter_sample_id') not in _tmp_sample_id:
_tmp_sample_id.append(alignment_status.get('submitter_sample_id'))
else:
index = _tmp_sample_id.index(alignment_status.get('submitter_sample_id'))
logger.warning('donor: {} has more than one aliquot_ids in tumour with the same submitter_sample_id: {}, one aliquot_id: {}, additional aliquot_id found: {}'
.format(donor.get('donor_unique_id'),
alignment_status.get('submitter_sample_id'),
donor.get('tumor_alignment_status')[index].get('aliquot_id'),
aliquot_id))
else:
logger.warning('invalid specimen type: {} in donor: {} with aliquot_id: {}'
.format(alignment_status.get('dcc_specimen_type'), donor.get('donor_unique_id'), aliquot_id)
)
def update_lane_count_flags(alignment_status):
if len(alignment_status.get('lane_count')) == 1:
alignment_status['do_lane_counts_in_every_bam_entry_match'] = True
if str(len(alignment_status.get('unaligned_bams'))) in alignment_status.get('lane_count'):
alignment_status['do_lane_count_and_bam_count_match'] = True
return alignment_status
def reorganize_unaligned_bam_info(alignment_status):
unaligned_bams = []
for gnos_id in alignment_status.get('unaligned_bams').keys():
unaligned_bams.append(
{
"gnos_id": gnos_id,
"bam_file_name": alignment_status.get('unaligned_bams').get(gnos_id).get('bam_file_name'),
"md5sum": alignment_status.get('unaligned_bams').get(gnos_id).get('md5sum'),
"gnos_repo": alignment_status.get('unaligned_bams').get(gnos_id).get('gnos_repo'),
}
)
alignment_status['unaligned_bams'] = unaligned_bams
update_lane_count_flags(alignment_status)
return alignment_status
def create_aggregated_bam_info_dict(bam):
aggregated_bam_info_dict = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"icgc_specimen_id": bam['icgc_specimen_id'],
"icgc_sample_id": bam['icgc_sample_id'],
"aligned": True,
"lane_count": set(),
"do_lane_counts_in_every_bam_entry_match": False,
"do_lane_count_and_bam_count_match": False,
"exist_specimen_type_mismatch": False,
"exist_aligned_bam_specimen_type_mismatch": False,
"exist_unaligned_bam_specimen_type_mismatch": False,
"exist_bam_with_unmappable_reads_specimen_type_mismatch": False,
"exists_xml_md5sum_mismatch": False,
"aligned_bam": {
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_size": bam['bam_file_size'],
"bam_file_md5sum": bam['md5sum'],
"bai_file_name": bam['bai_file_name'],
"bai_file_size": bam['bai_file_size'],
"bai_file_md5sum": bam['bai_file_md5sum'],
"effective_xml_md5sum": [bam['effective_xml_md5sum']],
"gnos_last_modified": [bam['last_modified']],
"gnos_published_date": [bam['published_date']],
"gnos_repo": [bam['gnos_repo']],
"is_santa_cruz_entry": bam['is_santa_cruz_entry'],
"is_aug2015_entry": bam['is_aug2015_entry'],
"is_oct2015_entry": bam['is_oct2015_entry'],
"is_mar2016_entry": bam['is_mar2016_entry'],
"is_may2016_entry": bam['is_may2016_entry'],
"is_s3_transfer_scheduled": bam['is_s3_transfer_scheduled'],
"is_s3_transfer_completed": bam['is_s3_transfer_completed']
},
"bam_with_unmappable_reads": {},
"unaligned_bams": {},
"oxog_score": bam['oxog_score'],
"ContEST": bam['ContEST'],
"Stars": bam['Stars']
}
return aggregated_bam_info_dict
def compare_specimen_type(specimen_type_A, specimen_type_B):
if 'normal' in specimen_type_A.lower() and 'normal' in specimen_type_B.lower() or \
'tumour' in specimen_type_A.lower() and 'tumour' in specimen_type_B.lower():
return True
else:
return False
def bam_aggregation(bam_files):
aggregated_bam_info_new = {}
if not aggregated_bam_info_new.get('WGS'):
aggregated_bam_info_new['WGS'] = {}
aggregated_bam_info = {}
for bam in bam_files: # check aligned BAM(s) first
if not bam['bam_type'] == 'Specimen level aligned BAM':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot
aggregated_bam_info[bam['aliquot_id']] = create_aggregated_bam_info_dict(bam)
else:
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
if not compare_specimen_type(alignment_status.get('dcc_specimen_type'), bam['dcc_specimen_type']):
alignment_status['exist_aligned_bam_specimen_type_mismatch'] = True
alignment_status['exist_specimen_type_mismatch'] = True
if alignment_status.get('aligned_bam').get('gnos_id') == bam['bam_gnos_ao_id']:
if bam['gnos_repo'] in alignment_status.get('aligned_bam').get('gnos_repo'):
logger.warning( 'Same aliquot: {}, same GNOS ID: {} in the same GNOS repo: {} more than once. This should never be possible.'
.format(
bam['aliquot_id'],
alignment_status.get('aligned_bam').get('gnos_id'),
bam['gnos_repo'])
)
else:
alignment_status.get('aligned_bam').get('gnos_repo').append(bam['gnos_repo'])
alignment_status.get('aligned_bam').get('gnos_last_modified').append(bam['last_modified'])
alignment_status.get('aligned_bam').get('gnos_published_date').append(bam['published_date'])
alignment_status.get('aligned_bam').get('effective_xml_md5sum').append(bam['effective_xml_md5sum'])
alignment_status['exists_xml_md5sum_mismatch'] = False if len(set(alignment_status.get('aligned_bam').get('effective_xml_md5sum'))) == 1 else True
else:
if bam['is_may2016_entry']:
aggregated_bam_info[bam['aliquot_id']] = create_aggregated_bam_info_dict(bam)
logger.info( 'Same aliquot: {} from donor: {} has different aligned GNOS BWA BAM entries, keep the one in may2016: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('aligned_bam').get('gnos_id')))
elif bam['is_mar2016_entry']:
aggregated_bam_info[bam['aliquot_id']] = create_aggregated_bam_info_dict(bam)
logger.info( 'Same aliquot: {} from donor: {} has different aligned GNOS BWA BAM entries, keep the one in mar2016: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('aligned_bam').get('gnos_id')))
elif bam['is_oct2015_entry']:
aggregated_bam_info[bam['aliquot_id']] = create_aggregated_bam_info_dict(bam)
logger.info( 'Same aliquot: {} from donor: {} has different aligned GNOS BWA BAM entries, keep the one in oct2015: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('aligned_bam').get('gnos_id')))
elif bam['is_s3_transfer_scheduled']:
aggregated_bam_info[bam['aliquot_id']] = create_aggregated_bam_info_dict(bam)
logger.info( 'Same aliquot: {} from donor: {} has different aligned GNOS BWA BAM entries, keep the one scheduled for S3 transfer: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('aligned_bam').get('gnos_id')))
elif bam['is_aug2015_entry']:
aggregated_bam_info[bam['aliquot_id']] = create_aggregated_bam_info_dict(bam)
logger.info( 'Same aliquot: {} from donor: {} has different aligned GNOS BWA BAM entries, keep the one in aug2015: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('aligned_bam').get('gnos_id')))
elif bam['is_santa_cruz_entry']:
aggregated_bam_info[bam['aliquot_id']] = create_aggregated_bam_info_dict(bam)
logger.info( 'Same aliquot: {} from donor: {} has different aligned GNOS BWA BAM entries, keep the one in santa_cruz: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('aligned_bam').get('gnos_id')))
else:
logger.warning( 'Same aliquot: {} from donor: {} has different aligned GNOS BWA BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
bam['donor_unique_id'],
alignment_status.get('aligned_bam').get('gnos_id'),
bam['gnos_metadata_url'])
)
sort_repos_by_time(aggregated_bam_info)
for bam in bam_files: # now check BAM with unmappable reads that were derived from aligned BAM
if not bam['bam_type'] == 'Specimen level unmapped reads after BWA alignment':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot, too bad this is an orphaned unmapped read BAM the main aligned BAM is missing
logger.warning('aliquot: {} has GNOS BAM entry for unmapped reads found: {}, however the main aligned BAM entry is missing'
.format(bam['aliquot_id'], bam['bam_gnos_ao_id'])
)
else:
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
if not compare_specimen_type(alignment_status.get('dcc_specimen_type'), bam['dcc_specimen_type']):
alignment_status['exist_bam_with_unmappable_reads_specimen_type_mismatch'] = True
alignment_status['exist_specimen_type_mismatch'] = True
if not alignment_status.get('bam_with_unmappable_reads'):
alignment_status['bam_with_unmappable_reads'] = {
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_size": bam['bam_file_size'],
"gnos_repo": set([bam['gnos_repo']])
}
elif alignment_status.get('bam_with_unmappable_reads').get('gnos_id') == bam['bam_gnos_ao_id']:
alignment_status.get('bam_with_unmappable_reads').get('gnos_repo').add(bam['gnos_repo'])
else:
logger.warning( 'same aliquot: {} has different unmappable reads GNOS BWA BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
alignment_status.get('bam_with_unmappable_reads').get('gnos_id'),
bam['bam_gnos_ao_id'])
)
for bam in bam_files: # last check original (submitted) unaligned BAM(s)
if not bam['bam_type'] == 'Unaligned BAM':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot with no aligned BAM yet
aggregated_bam_info[bam['aliquot_id']] = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"aligned": False,
"lane_count": set([bam['total_lanes']]),
"do_lane_counts_in_every_bam_entry_match": False,
"do_lane_count_and_bam_count_match": False,
"exist_specimen_type_mismatch": False,
"exist_aligned_bam_specimen_type_mismatch": False,
"exist_unaligned_bam_specimen_type_mismatch": False,
"exist_bam_with_unmappable_reads_specimen_type_mismatch": False,
"aligned_bam": {},
"bam_with_unmappable_reads": {},
"unaligned_bams": {
bam['bam_gnos_ao_id']: {
"bam_file_name": bam['bam_file_name'],
"md5sum": bam['md5sum'],
"gnos_repo": set([bam['gnos_repo']])
}
}
}
else: # aliquot already exists
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
alignment_status.get('lane_count').add(bam['total_lanes'])
if not compare_specimen_type(alignment_status.get('dcc_specimen_type'), bam['dcc_specimen_type']):
alignment_status['exist_unaligned_bam_specimen_type_mismatch'] = True
alignment_status['exist_specimen_type_mismatch'] = True
if alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']): # this unaligned bam was encountered before
if alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']).get('md5sum') == bam['md5sum']: # this unaligned bam has the same md5sum with encountered one
alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']).get('gnos_repo').add(bam['gnos_repo'])
else:
logger.warning( 'Unaligend lane-level BAMs with same gnos_id: {} have different md5sum, in use entry at gnos repo: {}, additional entry at gnos repo: {}'
.format(
bam['bam_gnos_ao_id'],
alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']).get('gnos_repo')[-1],
bam['gnos_repo'])
)
else:
alignment_status.get('unaligned_bams')[bam['bam_gnos_ao_id']] = {
"bam_file_name": bam['bam_file_name'],
"md5sum": bam['md5sum'],
"gnos_repo": set([bam['gnos_repo']])
}
aggregated_bam_info_new['WGS'] = aggregated_bam_info
aggregated_bam_info = {}
if not aggregated_bam_info_new.get('RNA-Seq'):
aggregated_bam_info_new['RNA-Seq'] = {}
for bam in bam_files: #check RNA-Seq BAMs
if not bam['bam_type'] == 'RNA-Seq aligned BAM':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot with RNA-Seq BAM
aggregated_bam_info[bam['aliquot_id']] = {}
aliquot_tmp = create_aggregated_rna_bam_info(bam)
if 'tophat' in bam.get('alignment').get('workflow_name').lower():
aggregated_bam_info.get(bam['aliquot_id'])['tophat'] = aliquot_tmp
elif 'star' in bam.get('alignment').get('workflow_name').lower():
aggregated_bam_info.get(bam['aliquot_id'])['star'] = aliquot_tmp
else: # other unknown alignment workflows
logger.warning('unknown RNA-Seq alignment workflows: {}'
.format(bam.get('alignment').get('workflow_name') ))
return
else: #aliquot already exists
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
if 'tophat' in bam.get('alignment').get('workflow_name').lower():
if not alignment_status.get('tophat'): # no tophat workflow for the aliquot
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['tophat'] = aliquot_tmp
elif alignment_status.get('tophat').get('aligned_bam').get('gnos_id') == bam['bam_gnos_ao_id']:
if bam['gnos_repo'] in alignment_status.get('tophat').get('aligned_bam').get('gnos_repo'):
logger.warning( 'Same aliquot: {}, same workflow: {}, same GNOS ID: {} in the same GNOS repo: {} more than once. This should never be possible.'
.format(
bam['aliquot_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('tophat').get('aligned_bam').get('gnos_id'),
bam['gnos_repo'])
)
else:
alignment_status.get('tophat').get('aligned_bam').get('gnos_repo').append(bam['gnos_repo'])
alignment_status.get('tophat').get('aligned_bam').get('gnos_last_modified').append(bam['last_modified'])
alignment_status.get('tophat').get('aligned_bam').get('gnos_published_date').append(bam['published_date'])
alignment_status.get('tophat').get('aligned_bam').get('effective_xml_md5sum').append(bam['effective_xml_md5sum'])
alignment_status.get('tophat')['exists_xml_md5sum_mismatch'] = False if len(set(alignment_status.get('tophat').get('aligned_bam').get('effective_xml_md5sum'))) == 1 else True
else:
if bam['is_may2016_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['tophat'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different tophat aligned GNOS RNA_Seq BAM entries, keep the one in may2016: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('tophat').get('aligned_bam').get('gnos_id')))
elif bam['is_mar2016_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['tophat'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different tophat aligned GNOS RNA_Seq BAM entries, keep the one in mar2016: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('tophat').get('aligned_bam').get('gnos_id')))
elif bam['is_oct2015_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['tophat'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different tophat aligned GNOS RNA_Seq BAM entries, keep the one in oct2015: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('tophat').get('aligned_bam').get('gnos_id')))
elif bam['is_s3_transfer_scheduled']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['tophat'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different tophat aligned GNOS RNA_Seq BAM entries, keep the one scheduled for S3 transfer: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('tophat').get('aligned_bam').get('gnos_id')))
elif bam['is_aug2015_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['tophat'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different tophat aligned GNOS RNA_Seq BAM entries, keep the one in aug2015: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('tophat').get('aligned_bam').get('gnos_id')))
elif bam['is_santa_cruz_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['tophat'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different tophat aligned GNOS RNA_Seq BAM entries, keep the one in santa_cruz: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('tophat').get('aligned_bam').get('gnos_id')))
else:
logger.warning( 'Same aliquot: {} from donor: {} using same workflow: {} has different tophat aligned GNOS RNA_Seq BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
bam['donor_unique_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('tophat').get('aligned_bam').get('gnos_id'),
bam['gnos_metadata_url'])
)
elif 'star' in bam.get('alignment').get('workflow_name').lower():
if not alignment_status.get('star'): # no star workflow for the aliquot
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['star'] = aliquot_tmp
elif alignment_status.get('star').get('aligned_bam').get('gnos_id') == bam['bam_gnos_ao_id']:
if bam['gnos_repo'] in alignment_status.get('star').get('aligned_bam').get('gnos_repo'):
logger.warning( 'Same aliquot: {}, same workflow: {}, same GNOS ID: {} in the same GNOS repo: {} more than once. This should never be possible.'
.format(
bam['aliquot_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('star').get('aligned_bam').get('gnos_id'),
bam['gnos_repo'])
)
else:
alignment_status.get('star').get('aligned_bam').get('gnos_repo').append(bam['gnos_repo'])
alignment_status.get('star').get('aligned_bam').get('gnos_last_modified').append(bam['last_modified'])
alignment_status.get('star').get('aligned_bam').get('gnos_published_date').append(bam['published_date'])
alignment_status.get('star').get('aligned_bam').get('effective_xml_md5sum').append(bam['effective_xml_md5sum'])
alignment_status.get('star')['exists_xml_md5sum_mismatch'] = False if len(set(alignment_status.get('star').get('aligned_bam').get('effective_xml_md5sum'))) == 1 else True
else:
if bam['is_may2016_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['star'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different star aligned GNOS RNA_Seq BAM entries, keep the one in may2016: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('star').get('aligned_bam').get('gnos_id')))
elif bam['is_mar2016_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['star'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different star aligned GNOS RNA_Seq BAM entries, keep the one in mar2016: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('star').get('aligned_bam').get('gnos_id')))
elif bam['is_oct2015_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['star'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different star aligned GNOS RNA_Seq BAM entries, keep the one in oct2015: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('star').get('aligned_bam').get('gnos_id')))
elif bam['is_s3_transfer_scheduled']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['star'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different star aligned GNOS RNA_Seq BAM entries, keep the one scheduled for transfer: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('star').get('aligned_bam').get('gnos_id')))
elif bam['is_aug2015_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['star'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different star aligned GNOS RNA_Seq BAM entries, keep the one in aug2015: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('star').get('aligned_bam').get('gnos_id')))
elif bam['is_santa_cruz_entry']:
aliquot_tmp = create_aggregated_rna_bam_info(bam)
alignment_status['star'] = aliquot_tmp
logger.info( 'Same aliquot: {} from donor: {} has different star aligned GNOS RNA_Seq BAM entries, keep the one in santa_cruz: {}, additional: {}'
.format(bam['aliquot_id'], bam['donor_unique_id'], bam['gnos_metadata_url'], alignment_status.get('star').get('aligned_bam').get('gnos_id')))
else:
logger.warning( 'Same aliquot: {} from donor: {} using same workflow: {} has different star aligned GNOS RNA_Seq BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
bam['donor_unique_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('star').get('aligned_bam').get('gnos_id'),
bam['gnos_metadata_url'])
)
else: # other unknown alignment workflows
logger.warning('unknown RNA-Seq alignment workflows: {}'
.format(bam.get('alignment').get('workflow_name') ))
return
aggregated_bam_info_new['RNA-Seq'] = aggregated_bam_info
return aggregated_bam_info_new
def create_aggregated_rna_bam_info(bam):
aliquot_tmp = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"icgc_specimen_id": bam['icgc_specimen_id'],
"icgc_sample_id": bam['icgc_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"aligned": True,
"is_santa_cruz_entry": bam['is_santa_cruz_entry'],
"is_aug2015_entry": bam['is_aug2015_entry'],
"is_oct2015_entry": bam['is_oct2015_entry'],
"is_mar2016_entry": bam['is_mar2016_entry'],
"is_may2016_entry": bam['is_may2016_entry'],
"is_s3_transfer_scheduled": bam['is_s3_transfer_scheduled'],
"is_s3_transfer_completed": bam['is_s3_transfer_completed'],
"exists_xml_md5sum_mismatch": False,
"aligned_bam": {
"gnos_repo": [bam['gnos_repo']],
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_md5sum": bam['md5sum'],
"bam_file_size": bam['bam_file_size'],
"bai_file_name": bam['bai_file_name'],
"bai_file_md5sum": bam['bai_file_md5sum'],
"bai_file_size": bam['bai_file_size'],
"gnos_last_modified": [bam['last_modified']],
"gnos_published_date": [bam['published_date']],
"effective_xml_md5sum": [bam['effective_xml_md5sum']]
}
}
return aliquot_tmp
def sort_repos_by_time(aggregated_bam_info):
for aliquot in aggregated_bam_info:
agg_bam = aggregated_bam_info.get(aliquot)
if not agg_bam.get('aligned_bam'):
continue
modified_dates = agg_bam.get('aligned_bam').get('gnos_last_modified')
published_dates = agg_bam.get('aligned_bam').get('gnos_published_date')
gnos_repos = agg_bam.get('aligned_bam').get('gnos_repo')
agg_bam.get('aligned_bam')['gnos_last_modified'], agg_bam.get('aligned_bam')['gnos_repo'], agg_bam.get('aligned_bam')['gnos_published_date'] = \
izip(*sorted(izip(modified_dates, gnos_repos, published_dates), key=lambda x: x[0]))
def find_latest_metadata_dir(output_dir):
dir_pattern = re.compile(u'^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}_[A-Z]{3}$')
metadata_dirs = []
for dir in os.listdir(output_dir):
if not os.path.isdir(output_dir + '/' + dir):
continue
if dir_pattern.search(dir):
metadata_dirs.append(output_dir + '/' + dir)
return sorted(metadata_dirs)[-1]
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(description="PCAWG GNOS Metadata Parser",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-c", "--config", dest="config",
help="Configuration file for GNOS repositories", required=True)
parser.add_argument("-m", "--metadata_dir", dest="metadata_dir",
help="Directory containing metadata manifest files", required=False)
parser.add_argument("-r", "--gnos_repo", dest="repo",
help="Specify which GNOS repo to process, process all repos if none specified", required=False)
parser.add_argument("-x", "--exclude_gnos_id_lists", dest="exclude_gnos_id_lists", # don't use this option for daily cron job
help="File(s) containing GNOS IDs to be excluded, use filename pattern to specify the file(s)", required=False)
parser.add_argument("-s", "--es_index_suffix", dest="es_index_suffix", # don't use this option for daily cron job
help="Single letter suffix for ES index name", required=False)
args = parser.parse_args()
metadata_dir = args.metadata_dir
conf_file = args.config
repo = args.repo
exclude_gnos_id_lists = args.exclude_gnos_id_lists
es_index_suffix = args.es_index_suffix
if not es_index_suffix: es_index_suffix = ''
with open(conf_file) as f:
conf = yaml.safe_load(f)
for r in conf.get('gnos_repos'):
conf[r.get('base_url')] = r.get('repo_code')
# output_dir
output_dir = conf.get('output_dir')
if metadata_dir:
if not os.path.isdir(metadata_dir): # TODO: should add more directory name check to make sure it's right
sys.exit('Error: specified metadata directory does not exist!')
else:
metadata_dir = find_latest_metadata_dir(output_dir) # sorted(glob.glob(output_dir + '/[0-9]*_*_*[A-Z]'))[-1] # find the directory for latest metadata list
timestamp = str.split(metadata_dir, '/')[-1]
logger.setLevel(logging.INFO)
ch.setLevel(logging.WARN)
log_file = metadata_dir + '.metadata_parser' + ('' if not repo else '.'+repo) + '.log'
# delete old log first if exists
if os.path.isfile(log_file): os.remove(log_file)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
es_host = 'localhost:9200'
es_index = 'p_' + ('' if not repo else repo+'_') + re.sub(r'\D', '', timestamp).replace('20','',1) + es_index_suffix
es = init_es(es_host, es_index)
logger.info('processing metadata list files in {} to build es index {}'.format(metadata_dir, es_index))
process(metadata_dir, conf, es_index, es, metadata_dir+'/donor_'+es_index+'.jsonl', metadata_dir+'/bam_'+es_index+'.jsonl', repo, exclude_gnos_id_lists)
# now update kibana dashboard
# donor
dashboard_name = ' ['+repo+']' if repo else ''
with open('kibana-donor.json', 'r') as d:
donor_dashboard = json.loads(d.read())
donor_dashboard['index']['default'] = es_index + '/donor'
title = 'PCAWG Donors' + dashboard_name + ' (beta)'
donor_dashboard['title'] = title
body = {
'dashboard': json.dumps(donor_dashboard),
'user': 'guest',
'group': 'guest',
'title': title
}
es.index(index='kibana-int', doc_type='dashboard', id='PCAWG Donors' + dashboard_name, body=body)
# bam search, no need this for now, not very useful
'''
with open('kibana-bam.json', 'r') as d:
bam_dashboard = json.loads(d.read())
bam_dashboard['index']['default'] = es_index + '/bam_file'
title = 'PCAWG BAMs' + dashboard_name + ' (beta)'
bam_dashboard['title'] = title
body = {
'dashboard': json.dumps(bam_dashboard),
'user': 'guest',
'group': 'guest',
'title': title
}
es.index(index='kibana-int', doc_type='dashboard', id='PCAWG BAMs' + dashboard_name, body=body)
'''
return 0
if __name__ == "__main__":
sys.exit(main())
|
ICGC-TCGA-PanCancer/pcawg-central-index
|
pcawg_metadata_parser/parse_gnos_xml.py
|
Python
|
gpl-2.0
| 150,932
|
[
"BWA"
] |
6556f9a8c649d0c9b51b2b23d8af80f3b4ab1a0a8f9d54d642080ff097c1dacb
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2017-2021 emijrp <emijrp@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import re
import sys
import time
import urllib.parse
import pwb
import pywikibot
from wikidatafun import *
"""
#filter by language
SELECT ?item
WHERE {
?item wdt:P31 wd:Q101352 .
FILTER NOT EXISTS { ?item wdt:P31 wd:Q4167410 } .
OPTIONAL { ?item schema:description ?itemDescription. FILTER(LANG(?itemDescription) = "ca"). }
FILTER (!BOUND(?itemDescription))
}
#all surnames
SELECT ?item
WHERE {
?item wdt:P31 wd:Q101352 .
FILTER NOT EXISTS { ?item wdt:P31 wd:Q4167410 } .
}
"""
#family
#genus
#species
#proteins https://query.wikidata.org/bigdata/namespace/wdq/sparql?query=SELECT%20%3FitemDescription%20(COUNT(%3Fitem)%20AS%20%3Fcount)%0AWHERE%20%7B%0A%09%3Fitem%20wdt%3AP279%20wd%3AQ8054.%0A%20%20%20%20%3Fitem%20schema%3Adescription%20%22mammalian%20protein%20found%20in%20Mus%20musculus%22%40en.%0A%20%20%20%20OPTIONAL%20%7B%20%3Fitem%20schema%3Adescription%20%3FitemDescription.%20FILTER(LANG(%3FitemDescription)%20%3D%20%22es%22).%20%20%7D%0A%09FILTER%20(BOUND(%3FitemDescription))%0A%7D%0AGROUP%20BY%20%3FitemDescription%0AORDER%20BY%20DESC(%3Fcount)
def genQuery(p31='', desc='', desclang=''):
if not p31 or not desc or not desclang:
print('Error genQuery', p31, desc, desclang)
sys.exit()
query = [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:%s ;
wdt:P31 ?instance .
?item schema:description "%s"@%s.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
""" % (p31, desc, desclang)
]
return query
def genQueriesByCountry(p31='', desc='', desclang=''):
queries = {}
queries[desc.replace('~', 'Afghanistan')] = genQuery(p31=p31, desc=desc.replace('~', 'Afghanistan'), desclang=desclang)
queries[desc.replace('~', 'Angola')] = genQuery(p31=p31, desc=desc.replace('~', 'Angola'), desclang=desclang)
queries[desc.replace('~', 'Armenia')] = genQuery(p31=p31, desc=desc.replace('~', 'Armenia'), desclang=desclang)
queries[desc.replace('~', 'Australia')] = genQuery(p31=p31, desc=desc.replace('~', 'Australia'), desclang=desclang)
queries[desc.replace('~', 'Bangladesh')] = genQuery(p31=p31, desc=desc.replace('~', 'Bangladesh'), desclang=desclang)
queries[desc.replace('~', 'Belarus')] = genQuery(p31=p31, desc=desc.replace('~', 'Belarus'), desclang=desclang)
queries[desc.replace('~', 'Belgium')] = genQuery(p31=p31, desc=desc.replace('~', 'Belgium'), desclang=desclang)
queries[desc.replace('~', 'Benin')] = genQuery(p31=p31, desc=desc.replace('~', 'Benin'), desclang=desclang)
queries[desc.replace('~', 'Bolivia')] = genQuery(p31=p31, desc=desc.replace('~', 'Bolivia'), desclang=desclang)
queries[desc.replace('~', 'Bosnia and Herzegovina')] = genQuery(p31=p31, desc=desc.replace('~', 'Bosnia and Herzegovina'), desclang=desclang)
queries[desc.replace('~', 'Botswana')] = genQuery(p31=p31, desc=desc.replace('~', 'Botswana'), desclang=desclang)
queries[desc.replace('~', 'Brazil')] = genQuery(p31=p31, desc=desc.replace('~', 'Brazil'), desclang=desclang)
queries[desc.replace('~', 'Brunei')] = genQuery(p31=p31, desc=desc.replace('~', 'Brunei'), desclang=desclang)
queries[desc.replace('~', 'Bulgaria')] = genQuery(p31=p31, desc=desc.replace('~', 'Bulgaria'), desclang=desclang)
queries[desc.replace('~', 'Burkina Faso')] = genQuery(p31=p31, desc=desc.replace('~', 'Burkina Faso'), desclang=desclang)
queries[desc.replace('~', 'Canada')] = genQuery(p31=p31, desc=desc.replace('~', 'Canada'), desclang=desclang)
queries[desc.replace('~', 'Chile')] = genQuery(p31=p31, desc=desc.replace('~', 'Chile'), desclang=desclang)
queries[desc.replace('~', 'Colombia')] = genQuery(p31=p31, desc=desc.replace('~', 'Colombia'), desclang=desclang)
queries[desc.replace('~', 'Croatia')] = genQuery(p31=p31, desc=desc.replace('~', 'Croatia'), desclang=desclang)
queries[desc.replace('~', 'Cuba')] = genQuery(p31=p31, desc=desc.replace('~', 'Cuba'), desclang=desclang)
queries[desc.replace('~', 'Cyprus')] = genQuery(p31=p31, desc=desc.replace('~', 'Cyprus'), desclang=desclang)
queries[desc.replace('~', 'Democratic Republic of the Congo')] = genQuery(p31=p31, desc=desc.replace('~', 'Democratic Republic of the Congo'), desclang=desclang)
queries[desc.replace('~', 'Equatorial Guinea')] = genQuery(p31=p31, desc=desc.replace('~', 'Equatorial Guinea'), desclang=desclang)
queries[desc.replace('~', 'Ethiopia')] = genQuery(p31=p31, desc=desc.replace('~', 'Ethiopia'), desclang=desclang)
queries[desc.replace('~', 'Fiji')] = genQuery(p31=p31, desc=desc.replace('~', 'Fiji'), desclang=desclang)
queries[desc.replace('~', 'Gabon')] = genQuery(p31=p31, desc=desc.replace('~', 'Gabon'), desclang=desclang)
queries[desc.replace('~', 'Germany')] = genQuery(p31=p31, desc=desc.replace('~', 'Germany'), desclang=desclang)
queries[desc.replace('~', 'Ghana')] = genQuery(p31=p31, desc=desc.replace('~', 'Ghana'), desclang=desclang)
queries[desc.replace('~', 'Guyana')] = genQuery(p31=p31, desc=desc.replace('~', 'Guyana'), desclang=desclang)
queries[desc.replace('~', 'India')] = genQuery(p31=p31, desc=desc.replace('~', 'India'), desclang=desclang)
queries[desc.replace('~', 'Indonesia')] = genQuery(p31=p31, desc=desc.replace('~', 'Indonesia'), desclang=desclang)
queries[desc.replace('~', 'Iran')] = genQuery(p31=p31, desc=desc.replace('~', 'Iran'), desclang=desclang)
queries[desc.replace('~', 'Japan')] = genQuery(p31=p31, desc=desc.replace('~', 'Japan'), desclang=desclang)
queries[desc.replace('~', 'Latvia')] = genQuery(p31=p31, desc=desc.replace('~', 'Latvia'), desclang=desclang)
queries[desc.replace('~', 'Lebanon')] = genQuery(p31=p31, desc=desc.replace('~', 'Lebanon'), desclang=desclang)
queries[desc.replace('~', 'Lithuania')] = genQuery(p31=p31, desc=desc.replace('~', 'Lithuania'), desclang=desclang)
queries[desc.replace('~', 'Malaysia')] = genQuery(p31=p31, desc=desc.replace('~', 'Malaysia'), desclang=desclang)
queries[desc.replace('~', 'Mexico')] = genQuery(p31=p31, desc=desc.replace('~', 'Mexico'), desclang=desclang)
queries[desc.replace('~', 'Mozambique')] = genQuery(p31=p31, desc=desc.replace('~', 'Mozambique'), desclang=desclang)
queries[desc.replace('~', 'New Zealand')] = genQuery(p31=p31, desc=desc.replace('~', 'New Zealand'), desclang=desclang)
queries[desc.replace('~', 'North Korea')] = genQuery(p31=p31, desc=desc.replace('~', 'North Korea'), desclang=desclang)
queries[desc.replace('~', 'Norway')] = genQuery(p31=p31, desc=desc.replace('~', 'Norway'), desclang=desclang)
queries[desc.replace('~', 'Pakistan')] = genQuery(p31=p31, desc=desc.replace('~', 'Pakistan'), desclang=desclang)
queries[desc.replace('~', "People's Republic of China")] = genQuery(p31=p31, desc=desc.replace('~', "People's Republic of China"), desclang=desclang)
queries[desc.replace('~', 'Poland')] = genQuery(p31=p31, desc=desc.replace('~', 'Poland'), desclang=desclang)
queries[desc.replace('~', 'Portugal')] = genQuery(p31=p31, desc=desc.replace('~', 'Portugal'), desclang=desclang)
queries[desc.replace('~', 'Republic of the Congo')] = genQuery(p31=p31, desc=desc.replace('~', 'Republic of the Congo'), desclang=desclang)
queries[desc.replace('~', 'Romania')] = genQuery(p31=p31, desc=desc.replace('~', 'Romania'), desclang=desclang)
queries[desc.replace('~', 'Russia')] = genQuery(p31=p31, desc=desc.replace('~', 'Russia'), desclang=desclang)
queries[desc.replace('~', 'Serbia')] = genQuery(p31=p31, desc=desc.replace('~', 'Serbia'), desclang=desclang)
queries[desc.replace('~', 'Sierra Leone')] = genQuery(p31=p31, desc=desc.replace('~', 'Sierra Leone'), desclang=desclang)
queries[desc.replace('~', 'Slovakia')] = genQuery(p31=p31, desc=desc.replace('~', 'Slovakia'), desclang=desclang)
queries[desc.replace('~', 'South Africa')] = genQuery(p31=p31, desc=desc.replace('~', 'South Africa'), desclang=desclang)
queries[desc.replace('~', 'South Sudan')] = genQuery(p31=p31, desc=desc.replace('~', 'South Sudan'), desclang=desclang)
queries[desc.replace('~', 'Spain')] = genQuery(p31=p31, desc=desc.replace('~', 'Spain'), desclang=desclang)
queries[desc.replace('~', 'Sweden')] = genQuery(p31=p31, desc=desc.replace('~', 'Sweden'), desclang=desclang)
queries[desc.replace('~', 'Taiwan')] = genQuery(p31=p31, desc=desc.replace('~', 'Taiwan'), desclang=desclang)
queries[desc.replace('~', 'Turkey')] = genQuery(p31=p31, desc=desc.replace('~', 'Turkey'), desclang=desclang)
queries[desc.replace('~', 'the Central African Republic')] = genQuery(p31=p31, desc=desc.replace('~', 'the Central African Republic'), desclang=desclang)
queries[desc.replace('~', 'the Philippines')] = genQuery(p31=p31, desc=desc.replace('~', 'the Philippines'), desclang=desclang)
queries[desc.replace('~', 'the United Kingdom')] = genQuery(p31=p31, desc=desc.replace('~', 'the United Kingdom'), desclang=desclang)
queries[desc.replace('~', 'United States of America')] = genQuery(p31=p31, desc=desc.replace('~', 'United States of America'), desclang=desclang)
queries[desc.replace('~', 'Ukraine')] = genQuery(p31=p31, desc=desc.replace('~', 'Ukraine'), desclang=desclang)
queries[desc.replace('~', 'Uganda')] = genQuery(p31=p31, desc=desc.replace('~', 'Uganda'), desclang=desclang)
queries[desc.replace('~', 'Uruguay')] = genQuery(p31=p31, desc=desc.replace('~', 'Uruguay'), desclang=desclang)
queries[desc.replace('~', 'Venezuela')] = genQuery(p31=p31, desc=desc.replace('~', 'Venezuela'), desclang=desclang)
queries[desc.replace('~', 'Vietnam')] = genQuery(p31=p31, desc=desc.replace('~', 'Vietnam'), desclang=desclang)
queries[desc.replace('~', 'Zambia')] = genQuery(p31=p31, desc=desc.replace('~', 'Zambia'), desclang=desclang)
return queries
def genTranslationsByCountryCore(desc='', desclang=''):
translations = {
'bay in ~': {
'en': 'bay in ~',
'es': 'bahía de ~',
},
'bight in ~': {
'en': 'bight in ~',
'es': 'ancón de ~',
},
'cape in ~': {
'en': 'cape in ~',
'es': 'cabo de ~',
},
'cave in ~': {
'en': 'cave in ~',
'es': 'cueva de ~',
},
'dune in ~': {
'en': 'dune in ~',
'es': 'duna de ~',
},
'glacier in ~': {
'en': 'glacier in ~',
'es': 'glaciar de ~',
},
'hill in ~': {
'en': 'hill in ~',
'es': 'colina de ~',
},
'island in ~': {
'en': 'island in ~',
'es': 'isla de ~',
},
'lagoon in ~': {
'en': 'lagoon in ~',
'es': 'laguna de ~',
},
'lake in ~': {
'en': 'lake in ~',
'es': 'lago de ~',
},
'mine in ~': {
'en': 'mine in ~',
'es': 'mina de ~',
},
'mountain in ~': {
'en': 'mountain in ~',
'es': 'montaña de ~',
},
'plain in ~': {
'en': 'plain in ~',
'es': 'llanura de ~',
},
'reef in ~': {
'en': 'reef in ~',
'es': 'arrecife de ~',
},
'reservoir in ~': {
'en': 'reservoir in ~',
'es': 'embalse de ~',
},
'river in ~': {
'en': 'river in ~',
'es': 'río de ~',
},
'road in ~': {
'en': 'road in ~',
'es': 'carretera de ~',
},
'spring in ~': {
'en': 'spring in ~',
'es': 'manantial de ~',
},
'stream in ~': {
'en': 'stream in ~',
'es': 'arroyo de ~',
},
'swamp in ~': {
'en': 'swamp in ~',
'es': 'pantano de ~',
},
'valley in ~': {
'en': 'valley in ~',
'es': 'valle de ~',
},
'watercourse in ~': {
'en': 'watercourse in ~',
'es': 'curso de agua de ~',
},
}
return translations[desc][desclang]
def genTranslationsByCountry(desc=''):
translations = {}
translations[desc.replace('~', 'Afghanistan')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Afghanistan'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Afganistán'),
}
translations[desc.replace('~', 'Angola')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Angola'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Angola'),
}
translations[desc.replace('~', 'Armenia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Armenia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Armenia'),
}
translations[desc.replace('~', 'Australia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Australia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Australia'),
}
translations[desc.replace('~', 'Bangladesh')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Bangladesh'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Bangladesh'),
}
translations[desc.replace('~', 'Belarus')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Belarus'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Bielorrusia'),
}
translations[desc.replace('~', 'Belgium')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Belgium'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Bélgica'),
}
translations[desc.replace('~', 'Benin')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Benin'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Benín'),
}
translations[desc.replace('~', 'Bolivia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Bolivia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Bolivia'),
}
translations[desc.replace('~', 'Bosnia and Herzegovina')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Bosnia and Herzegovina'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Bosnia y Herzegovina'),
}
translations[desc.replace('~', 'Botswana')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Botswana'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Botsuana'),
}
translations[desc.replace('~', 'Brazil')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Brazil'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Brasil'),
}
translations[desc.replace('~', 'Brunei')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Brunei'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Brunéi'),
}
translations[desc.replace('~', 'Bulgaria')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Bulgaria'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Bulgaria'),
}
translations[desc.replace('~', 'Burkina Faso')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Burkina Faso'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Burkina Faso'),
}
translations[desc.replace('~', 'Canada')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Canada'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Canadá'),
}
translations[desc.replace('~', 'Chile')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Chile'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Chile'),
}
translations[desc.replace('~', 'Colombia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Colombia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Colombia'),
}
translations[desc.replace('~', 'Croatia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Croatia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Croacia'),
}
translations[desc.replace('~', 'Cuba')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Cuba'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Cuba'),
}
translations[desc.replace('~', 'Cyprus')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Cyprus'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Chipre'),
}
translations[desc.replace('~', 'Democratic Republic of the Congo')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Democratic Republic of the Congo'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'República Democrática del Congo'),
}
translations[desc.replace('~', 'Equatorial Guinea')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Equatorial Guinea'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Guinea Ecuatorial'),
}
translations[desc.replace('~', 'Ethiopia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Ethiopia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Etiopía'),
}
translations[desc.replace('~', 'Fiji')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Fiji'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Fiji'),
}
translations[desc.replace('~', 'Gabon')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Gabon'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Gabón'),
}
translations[desc.replace('~', 'Germany')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Germany'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Alemania'),
}
translations[desc.replace('~', 'Ghana')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Ghana'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Ghana'),
}
translations[desc.replace('~', 'Guyana')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Guyana'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Guyana'),
}
translations[desc.replace('~', 'India')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'India'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'la India'),
}
translations[desc.replace('~', 'Indonesia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Indonesia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Indonesia'),
}
translations[desc.replace('~', 'Iran')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Iran'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Irán'),
}
translations[desc.replace('~', 'Japan')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Japan'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Japón'),
}
translations[desc.replace('~', 'Latvia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Latvia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Letonia'),
}
translations[desc.replace('~', 'Lebanon')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Lebanon'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Líbano'),
}
translations[desc.replace('~', 'Lithuania')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Lithuania'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Lituania'),
}
translations[desc.replace('~', 'Malaysia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Malaysia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Malasia'),
}
translations[desc.replace('~', 'Mexico')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Mexico'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'México'),
}
translations[desc.replace('~', 'Mozambique')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Mozambique'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Mozambique'),
}
translations[desc.replace('~', 'New Zealand')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'New Zealand'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Nueva Zelanda'),
}
translations[desc.replace('~', 'North Korea')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'North Korea'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Corea del Norte'),
}
translations[desc.replace('~', 'Norway')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Norway'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Noruega'),
}
translations[desc.replace('~', 'Pakistan')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Pakistan'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Pakistán'),
}
translations[desc.replace('~', "People's Republic of China")] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', "People's Republic of China"),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'la República Popular China'),
}
translations[desc.replace('~', 'Poland')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Poland'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Polonia'),
}
translations[desc.replace('~', 'Portugal')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Portugal'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Portugal'),
}
translations[desc.replace('~', 'Republic of the Congo')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Republic of the Congo'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'República del Congo'),
}
translations[desc.replace('~', 'Romania')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Romania'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Rumanía'),
}
translations[desc.replace('~', 'Russia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Russia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Rusia'),
}
translations[desc.replace('~', 'Serbia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Serbia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Serbia'),
}
translations[desc.replace('~', 'Sierra Leone')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Sierra Leone'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Sierra Leona'),
}
translations[desc.replace('~', 'Slovakia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Slovakia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Eslovaquia'),
}
translations[desc.replace('~', 'South Africa')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'South Africa'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Sudáfrica'),
}
translations[desc.replace('~', 'South Sudan')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'South Sudan'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Sudán del Sur'),
}
translations[desc.replace('~', 'Spain')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Spain'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'España'),
}
translations[desc.replace('~', 'Sweden')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Sweden'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Suecia'),
}
translations[desc.replace('~', 'Taiwan')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Taiwan'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Taiwán'),
}
translations[desc.replace('~', 'Turkey')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Turkey'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Turquía'),
}
translations[desc.replace('~', 'the Central African Republic')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'the Central African Republic'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'República Centroafricana'),
}
translations[desc.replace('~', 'the Philippines')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'the Philippines'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Filipinas'),
}
translations[desc.replace('~', 'the United Kingdom')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'the United Kingdom'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Reino Unido'),
}
translations[desc.replace('~', 'United States of America')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'United States of America'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Estados Unidos'),
}
translations[desc.replace('~', 'Ukraine')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Ukraine'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Ucrania'),
}
translations[desc.replace('~', 'Uganda')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Uganda'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Uganda'),
}
translations[desc.replace('~', 'Uruguay')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Uruguay'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Uruguay'),
}
translations[desc.replace('~', 'Venezuela')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Venezuela'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Venezuela'),
}
translations[desc.replace('~', 'Vietnam')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Vietnam'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Vietnam'),
}
translations[desc.replace('~', 'Zambia')] = {
'en': genTranslationsByCountryCore(desc=desc, desclang='en').replace('~', 'Zambia'),
'es': genTranslationsByCountryCore(desc=desc, desclang='es').replace('~', 'Zambia'),
}
return translations
def main():
fixthiswhenfound = { #fix (overwrite) old, wrong or poor translations
'chemical compound': {
'nl': ['chemische stof'], #https://www.wikidata.org/w/index.php?title=Q27165025&type=revision&diff=486050731&oldid=466952438
},
'family name': {
'sq': ['mbiemri'], #because "mbiemri" = "the family name"
},
'galaxy': {
'es': ['galaxy'],
},
'species of insect': {
'sq': ['specie e insekteve'], #https://github.com/emijrp/wikidata/pull/47
},
'television series': {
'es': ['television series'], #https://www.wikidata.org/w/index.php?title=Q1043980&oldid=837349507
},
'village in China': {
'bn': ['চীনের গ্রাম'], #https://www.wikidata.org/w/index.php?title=User_talk:Emijrp&diff=prev&oldid=510797889
'fi': ['kiinalainen kylä'], #https://www.wikidata.org/w/index.php?title=User_talk:Emijrp&diff=468197059&oldid=463649230
'id': ['desa di Cina'],
},
'Wikimedia category': {
'arz': ['ويكيبيديا:تصنيف'],
'be': ['катэгарызацыя'], #https://www.wikidata.org/w/index.php?title=User:Emijrp/Wikimedia_project_pages_matrix&diff=next&oldid=500158307
'be-tarask': ['Катэгорыя', 'Вікіпэдыя:Катэгорыя'],#https://www.wikidata.org/w/index.php?title=User:Emijrp/Wikimedia_project_pages_matrix&diff=next&oldid=500158307
'es': ['categoría de Wikipedia'],
'mk': ['категорија на Википедија'],
'uk': ['Категорії', 'категорія в проекті Вікімедіа'], #https://www.wikidata.org/w/index.php?title=User_talk%3AEmijrp&type=revision&diff=527336622&oldid=525302741
},
'Wikimedia disambiguation page': {
'bn': ['উইকিমিডিয়া দ্ব্যর্থতা নিরসন পাতা'],
'el': ['σελίδα αποσαφήνισης'],#https://www.wikidata.org/w/index.php?title=Q29449981&diff=prev&oldid=567203989
'es': ['desambiguación de Wikipedia'],
'fy': ['Betsjuttingsside'], #https://www.wikidata.org/w/index.php?title=User:Emijrp/Wikimedia_project_pages_matrix&curid=30597789&diff=499110338&oldid=498167178
'id': ['halaman disambiguasi'],
'tg': ['саҳифаи ибҳомзудоии Викимаълумот'], #https://www.wikidata.org/w/index.php?title=Topic:Ts4qkooukddjcuq9&topic_showPostId=ts4rax4ro9brqqgj#flow-post-ts4rax4ro9brqqgj
'tt': ['Википедия:Күп мәгънәле мәкаләләр'],
'uk': ['сторінка значень в проекті Вікімедіа'],
},
'Wikimedia list article': {
'et': ['Vikipeedia:Loend', 'Vikipeedia loend'], #https://www.wikidata.org/w/index.php?title=Q13406463&diff=next&oldid=588159017
'id': ['Wikipedia:Daftar'], #https://www.wikidata.org/w/index.php?title=Q13406463&diff=745905758&oldid=745029653
'tg': ['саҳифае, ки аз рӯйхат иборат аст'], #https://www.wikidata.org/w/index.php?title=Q13406463&diff=prev&oldid=498154491
'uk': ['сторінка-список в проекті Вікімедіа'], #https://www.wikidata.org/w/index.php?title=Q13406463&diff=617531932&oldid=606446211
},
'Wikimedia template': {
'be': ['шаблон Вікіпедыя'],
'be-tarask': ['шаблён Вікіпэдыя'],
'eu': ['Wikimediarako txantiloia'], #https://www.wikidata.org/w/index.php?title=Q11266439&type=revision&diff=469566880&oldid=469541605
'id': ['templat Wikipedia'],
'nb': ['Wikipedia mal'],
'nn': ['Wikipedia mal'],
'pl': ['szablon Wikipedii'],
'sq': ['Wikipedia stampa'],
'sv': ['Wikipedia mall'],
'sw': ['kigezo Wikipedia'],
'tg': ['Шаблони Викимедиа', 'шаблон Википедиа'], #https://www.wikidata.org/w/index.php?title=Q11266439&diff=prev&oldid=498153879
'tr': ['şablon Vikipedi'],
'uk': ['шаблон Вікіпедії', 'шаблон проекту Вікімедіа'],
'yo': ['àdàkọ Wikipedia'],
'vi': ['bản mẫu Wikipedia'],
},
'Wikinews article': {
'nl': ['Wikinews-artikel'],
},
}
translations = {
'asteroid': { #Q3863
'af': 'asteroïde',
'an': 'asteroide',
'ar': 'كويكب',
'as': 'গ্ৰহাণু',
'ast': 'asteroide',
'az': 'asteroid',
'azb': 'گزگنچه',
'ba': 'Астероид',
'bar': 'Asteroid',
'be': 'астэроід',
'be-tarask': 'астэроід',
'bg': 'астероид',
'bho': 'एस्टेरॉइड्स',
'bjn': 'asteruid',
'bn': 'গ্রহাণু',
'br': 'asteroidenn',
'bs': 'Asteroid',
'ca': 'asteroide',
'ce': 'Астероид',
'chr': 'ᏅᏯ ᏧᎳᎬᎭᎸᏓ',
'ckb': 'گەڕەستێرۆچکە',
'cs': 'asteroid',
'cv': 'астероид',
'cy': 'asteroid',
'da': 'asteroide',
'de': 'Asteroid',
'el': 'αστεροειδής',
'eml': 'Asteròid',
'en': 'asteroid',
'en-ca': 'asteroid',
'en-gb': 'asteroid',
'eo': 'asteroido',
'es': 'asteroide',
'et': 'asteroid',
'eu': 'asteroide',
'fa': 'سیارک',
'fi': 'asteroidi',
'fr': 'astéroïde',
'fy': 'asteroïde',
'ga': 'astaróideach',
'gd': 'astaroid',
'gl': 'asteroide',
'gn': 'Mbyjaveve',
'gsw': 'Asteroid',
'gv': 'roltageagh',
'he': 'אסטרואיד',
'hi': 'क्षुद्रग्रह',
'hif': 'Chhota tara',
'hr': 'asteroidi',
'hsb': 'Asteroid',
'ht': 'astewoyid',
'hu': 'kisbolygó',
'hy': 'աստերոիդ',
'ia': 'asteroide',
'id': 'asteroid',
'ilo': 'asteroid',
'io': 'asteroido',
'is': 'Smástirni',
'it': 'asteroide',
'ja': '小惑星',
'jam': 'Astaraid',
'jbo': 'cmaplini',
'jv': 'asteroid',
'ka': 'მცირე ცთომილები',
'kaa': 'Asteroid',
'kab': 'Azungur',
'kk': 'астероид',
'ko': '소행성',
'krc': 'Астероид',
'ksh': 'Asteroid',
'ku': 'asteroîd',
'ky': 'астероид',
'la': 'asteroides',
'lb': 'Asteroid',
'lez': 'астероид',
'lmo': 'Asteroide',
'lo': 'ດາວເຄາະນ້ອຍ',
'lt': 'Asteroidas',
'lv': 'asteroīds',
'lzh': '小行星',
'min': 'asteroid',
'mk': 'астероид',
'ml': 'ഛിന്നഗ്രഹം',
'mr': 'लघुग्रह',
'ms': 'asteroid',
'mt': 'asterojde',
'mwl': 'asteróide',
'my': 'ဥက္ကာပျံ',
'nan': 'sió-he̍k-chheⁿ',
'nap': 'asteroide',
'nb': 'asteroide',
'nds': 'Asteroid',
'nl': 'asteroïde',
'nn': 'asteroide',
'oc': 'asteroïde',
'or': 'ଗ୍ରହାଣୁ',
'os': 'Астероид',
'pa': 'ਨਿੱਕਾ ਗ੍ਰਹਿ',
'pam': 'asteroid',
'pms': 'Asteròid',
'pnb': 'تارے ورگا',
'pt': 'asteroide',
'pt-br': 'asteroide',
'ro': 'Asteroid',
'ru': 'астероид',
'rue': 'астероід',
'sah': 'Астероид',
'scn': 'astiròidi',
'sco': 'asteroid',
'sgs': 'Asteruoids',
'sh': 'asteroid',
'si': 'ග්රහක',
'sk': 'asteroid',
'sl': 'asteroid',
'sq': 'asteroid',
'sr': 'астероид',
'su': 'Astéroid',
'sv': 'asteroid',
'sw': 'asteroidi',
'ta': 'சிறுகோள்',
'tg': 'Сайёрак',
'th': 'ดาวเคราะห์น้อย',
'tk': 'Asteroid',
'tl': 'asteroyd',
'tr': 'Asteroit',
'tt': 'астероид',
'tt-cyrl': 'астероид',
'tyv': 'Астероид',
'uk': 'астероїд',
'ur': 'نجمانی',
'vi': 'tiểu hành tinh',
'vls': 'Asteroïde',
'war': 'Asteroyd',
'wuu': '小行星',
'xmf': 'ასტეროიდი',
'yi': 'אסטערויד',
'yue': '小行星',
'zh': '小行星',
'zh-cn': '小行星',
'zh-hans': '小行星',
'zh-hant': '小行星',
'zh-hk': '小行星',
'zh-mo': '小行星',
'zh-my': '小行星',
'zh-sg': '小行星',
},
'chemical compound': { #Q11173
'af': 'chemiese verbinding',
'an': 'compuesto quimico',
'ar': 'مركب كيميائي',
'ast': 'compuestu químicu',
'be': 'хімічнае злучэнне',
'be-tarask': 'хімічнае злучэньне',
'bg': 'химично съединение',
'bn': 'রাসায়নিক যৌগ',
'ca': 'compost químic',
'cs': 'chemická sloučenina',
'cy': 'cyfansoddyn cemegol',
'da': 'kemisk forbindelse',
'de': 'chemische Verbindung',
'de-ch': 'chemische Verbindung',
'el': 'χημική ένωση',
'en': 'chemical compound',
'en-ca': 'chemical compound',
'en-gb': 'chemical compound',
'eo': 'kemia kombinaĵo',
'es': 'compuesto químico',
'et': 'keemiline ühend',
'eu': 'konposatu kimiko',
'fr': 'composé chimique',
'fy': 'gemyske ferbining',
'gl': 'composto químico',
'he': 'תרכובת',
'hy': 'քիմիական միացություն',
'ia': 'composito chimic',
'id': 'senyawa kimia',
'io': 'kemiala kompozajo',
'it': 'composto chimico',
'la': 'compositum chemicum',
'lb': 'chemesch Verbindung',
'lv': 'ķīmisks savienojums',
'mk': 'хемиско соединение',
'nb': 'kjemisk forbindelse',
'nl': 'chemische verbinding',
'nn': 'kjemisk sambinding',
'oc': 'component quimic',
'pl': 'związek chemiczny',
'pt': 'composto químico',
'pt-br': 'composto químico',
'ro': 'compus chimic',
'ru': 'химическое соединение',
'scn': 'cumpostu chìmicu',
'sk': 'chemická zlúčenina',
'sq': 'komponim kimik',
'uk': 'хімічна сполука',
'yue': '化合物',
'zh': '化合物',
'zh-cn': '化合物',
'zh-hans': '化合物',
'zh-hant': '化合物',
'zh-hk': '化合物',
'zh-mo': '化合物',
'zh-sg': '化合物',
'zh-tw': '化合物',
},
'douar in Morocco': { #Q23925393
'de': 'douar in Marokko',
'en': 'douar in Morocco',
'es': 'douar de Marruecos',
'fr': 'douar marocain',
'nl': 'douar in Marokko',
},
'encyclopedic article': { #Q17329259
'ar': 'مقالة موسوعية',
'ast': 'artículu enciclopédicu',
'be': 'энцыклапедычны артыкул',
'bn': 'বিশ্বকোষীয় নিবন্ধ',
'ca': 'article enciclopèdic',
'cs': 'encyklopedický článek',
'da': 'encyklopædiartikel',
'de': 'enzyklopädischer Artikel',
'el': 'λήμμα εγκυκλοπαίδειας',
'en': 'encyclopedic article',
'eo': 'enciklopedia artikolo',
'es': 'artículo de enciclopedia',
'et': 'entsüklopeedia artikkel',
'eu': 'entziklopedia artikulu',
'fi': 'tietosanakirja-artikkeli',
'fr': "article d'encyclopédie",
'frc': "article d'encyclopédie",
'fy': 'ensyklopedysk artikel',
'gl': 'artigo enciclopédico',
'he': 'ערך אנציקלופדי',
'hy': 'հանրագիտարանային հոդված',
'hu': 'enciklopédia-szócikk',
'id': 'artikel ensiklopedia',
'io': 'enciklopediala artiklo',
'it': 'voce enciclopedica',
'ja': '百科事典の記事',
'ka': 'ენციკლოპედიური სტატია',
'lt': 'enciklopedinis straipsnis',
'lv': 'enciklopēdisks raksts',
'mk': 'енциклопедиска статија',
'nb': 'encyklopedisk artikkel',
'nl': 'encyclopedisch artikel',
'nn': 'ensyklopedisk artikkel',
'pl': 'artykuł w encyklopedii',
'pt-br': 'artigo enciclopédico',
'ro': 'articol enciclopedic',
'ru': 'энциклопедическая статья',
'sl': 'enciklopedični članek',
'sq': 'artikull enciklopedik',
'sr': 'енциклопедијски чланак',
'sr-ec': 'енциклопедијски чланак',
'sr-el': 'enciklopedijski članak',
'sv': 'encyklopedisk artikel',
'tg': 'мақолаи энсиклопедӣ',
'tg-cyrl': 'мақолаи энсиклопедӣ',
'tt': 'энциклопедик мәкалә',
'tt-cyrl': 'энциклопедик мәкалә',
'uk': 'енциклопедична стаття',
'zh': '条目',
'zh-hans': '百科全书条目',
},
'entry in Dictionary of National Biography': {
'en': 'entry in Dictionary of National Biography',
'es': 'entrada del Dictionary of National Biography',
},
'extrasolar planet': {
'af': 'eksoplaneet',
'ast': 'planeta estrasolar',
'az': 'ekzoplanet',
'be': 'Экзапланета',
'be-tarask': 'Экзаплянэта',
'bg': 'Екзопланета',
'bn': 'বহির্গ্রহ',
'br': 'Ezplanedenn',
'bs': 'vansolarna planeta',
'ca': 'planeta extrasolar',
'cs': 'exoplaneta',
'cv': 'Экзопланета',
'de': 'extrasolarer Planet',
'en': 'extrasolar planet',
'en-ca': 'extrasolar planet',
'en-gb': 'extrasolar planet',
'es': 'planeta extrasolar',
'fi': 'eksoplaneetta',
'fr': 'exoplanète',
'gl': 'planeta extrasolar',
'it': 'pianeta extrasolare',
'nb': 'eksoplanet',
'nl': 'exoplaneet',
'pt': 'exoplaneta',
'pt-br': 'exoplaneta',
'wuu': '太阳系外行星',
'yue': '太陽系外行星',
'zh': '太陽系外行星',
},
#more families https://query.wikidata.org/#SELECT %3FitemDescription (COUNT(%3Fitem) AS %3Fcount)%0AWHERE {%0A%09%3Fitem wdt%3AP31 wd%3AQ16521.%0A %3Fitem wdt%3AP105 wd%3AQ35409.%0A %23%3Fitem schema%3Adescription "family of insects"%40en.%0A OPTIONAL { %3Fitem schema%3Adescription %3FitemDescription. FILTER(LANG(%3FitemDescription) %3D "en"). }%0A%09FILTER (BOUND(%3FitemDescription))%0A}%0AGROUP BY %3FitemDescription%0AORDER BY DESC(%3Fcount)
'family of crustaceans': {
'en': 'family of crustaceans',
'es': 'familia de crustáceos',
'et': 'koorikloomade sugukond',
'he': 'משפחה של סרטנאים',
'io': 'familio di krustacei',
'ro': 'familie de crustacee',
},
'family of insects': {
'bn': 'কীটপতঙ্গের পরিবার',
'en': 'family of insects',
'es': 'familia de insectos',
'et': 'putukate sugukond',
'fr': 'famille d\'insectes',
'he': 'משפחה של חרקים',
'io': 'familio di insekti',
'ro': 'familie de insecte',
},
'family of molluscs': {
'bn': 'মলাস্কার পরিবার',
'en': 'family of molluscs',
'es': 'familia de moluscos',
'et': 'limuste sugukond',
'he': 'משפחה של רכיכות',
'io': 'familio di moluski',
'ro': 'familie de moluște',
},
'family of plants': {
'bn': 'উদ্ভিদের পরিবার',
'cy': 'teulu o blanhigion',
'en': 'family of plants',
'es': 'familia de plantas',
'et': 'taimesugukond',
'he': 'משפחה של צמחים',
'io': 'familio di planti',
'ro': 'familie de plante',
},
'galaxy': {
'ast': 'galaxa',
'ca': 'galàxia',
'en': 'galaxia',
'eo': 'galaksio',
'es': 'galaxia',
'fr': 'galaxie',
'gl': 'galaxia',
'pt': 'galáxia',
},
'genus of algae': {
'ar': 'جنس من الطحالب',
'bn': 'শৈবালের গণ',
'en': 'genus of algae',
'es': 'género de algas',
'et': 'vetikaperekond',
'gl': 'xénero de algas',
'he': 'סוג של אצה',
'id': 'genus alga',
'io': 'genero di algi',
'nb': 'algeslekt',
'nn': 'algeslekt',
'ro': 'gen de alge',
'sq': 'gjini e algave',
},
'genus of amphibians': {
'ar': 'جنس من البرمائيات',
'bn': 'উভচর প্রাণীর গণ',
'en': 'genus of amphibians',
'es': 'género de anfibios',
'et': 'kahepaiksete perekond',
'fr': "genre d'amphibiens",
'he': 'סוג של דו־חיים',
'id': 'genus amfibi',
'io': 'genero di amfibii',
'it': 'genere di anfibi',
'nb': 'amfibieslekt',
'nn': 'amfibieslekt',
'ro': 'gen de amfibieni',
'ru': 'род амфибий',
'sq': 'gjini e amfibeve',
},
'genus of arachnids': {
'ar': 'جنس من العنكبوتيات',
'bn': 'অ্যারাকনিডের গণ',
'ca': "gènere d'aràcnids",
'en': 'genus of arachnids',
'es': 'género de arañas',
'et': 'ämblikulaadsete perekond',
'fr': "genre d'araignées",
'he': 'סוג של עכביש',
'id': 'genus arachnida',
'io': 'genero di aranei',
'it': 'genere di ragni',
'nb': 'edderkoppslekt',
'nn': 'edderkoppslekt',
'ro': 'gen de arahnide',
},
'genus of birds': {
'ar': 'جنس من الطيور',
'bn': 'পাখির গণ',
'ca': "gènere d'ocells",
'cy': 'genws o adar',
'en': 'genus of birds',
'es': 'género de aves',
'et': 'linnuperekond',
'fr': "genre d'oiseaux",
'gl': 'xénero de aves',
'he': 'סוג של ציפור',
'id': 'genus burung',
'io': 'genero di uceli',
'it': 'genere di uccelli',
'ro': 'gen de păsări',
'sq': 'gjini e zogjve',
},
'genus of fishes': {
'ar': 'جنس من الأسماك',
'bn': 'মাছের গণ',
'en': 'genus of fishes',
'es': 'género de peces',
'et': 'kalade perekond',
'fr': 'genre de poissons',
'he': 'סוג של דג',
'id': 'genus ikan',
'io': 'genero di fishi',
'it': 'genere di pesci',
'nb': 'fiskeslekt',
'nn': 'fiskeslekt',
'pt': 'género de peixes',
'pt-br': 'gênero de peixes',
'ro': 'gen de pești',
'sq': 'gjini e peshqëve',
},
'genus of fungi': {
'ar': 'جنس من الفطريات',
'bn': 'ছত্রাকের গণ',
'en': 'genus of fungi',
'es': 'género de hongos',
'et': 'seente perekond',
'fr': 'genre de champignons',
'gl': 'xénero de fungos',
'he': 'סוג של פטריה',
'id': 'genus fungi',
'io': 'genero di fungi',
'it': 'genere di funghi',
'nb': 'soppslekt',
'nn': 'soppslekt',
'pt': 'género de fungos',
'pt-br': 'gênero de fungos',
# 'ro': 'gen de fungi',# or 'gen de ciuperci'
'sq': 'gjini e kërpudhave',
},
'genus of insects': {
'ar': 'جنس من الحشرات',
'bn': 'কীটপতঙ্গের গণ',
'ca': "gènere d'insectes",
'en': 'genus of insects',
'es': 'género de insectos',
'et': 'putukate perekond',
'fr': "genre d'insectes",
'he': 'סוג של חרק',
'id': 'genus serangga',
'io': 'genero di insekti',
'it': 'genere di insetti',
'nb': 'insektslekt',
'nn': 'insektslekt',
'pt': 'género de insetos',
'pt-br': 'gênero de insetos',
'ro': 'gen de insecte',
'ru': 'род насекомых',
'sq': 'gjini e insekteve',
},
'genus of mammals': {
'ar': 'جنس من الثدييات',
'bn': 'স্তন্যপায়ীর গণ',
'ca': 'gènere de mamífers',
'en': 'genus of mammals',
'es': 'género de mamíferos',
'et': 'imetajate perekond',
'fr': 'genre de mammifères',
'gl': 'xénero de mamíferos',
'he': 'סוג של יונק',
'id': 'genus mamalia',
'io': 'genero di mamiferi',
'nb': 'pattedyrslekt',
'nn': 'pattedyrslekt',
'ro': 'gen de mamifere',
'sq': 'gjini e gjitarëve',
},
'genus of molluscs': {
'ar': 'جنس من الرخويات',
'bn': 'মলাস্কার গণ',
'ca': 'gènere de mol·luscs',
'en': 'genus of molluscs',
'es': 'género de moluscos',
'et': 'limuste perekond',
'fr': 'genre de mollusques',
'gl': 'xénero de moluscos',
'he': 'סוג של רכיכה',
'id': 'genus moluska',
'io': 'genero di moluski',
'it': 'genere di molluschi',
'nb': 'bløtdyrslekt',
'nn': 'blautdyrslekt',
'ro': 'gen de moluște',
'sq': 'gjini e molusqeve',
},
'genus of plants': {
'ar': 'جنس من النباتات',
'ca': 'gènere de plantes',
'bn': 'উদ্ভিদের গণ',
'cy': 'genws o blanhigion',
'en': 'genus of plants',
'es': 'género de plantas',
'et': 'taimeperekond',
'fr': 'genre de plantes',
'gl': 'xénero de plantas',
'he': 'סוג של צמח',
'id': 'genus tumbuh-tumbuhan',
'io': 'genero di planti',
'nb': 'planteslekt',
'nn': 'planteslekt',
'pt': 'género de plantas',
'pt-br': 'gênero de plantas',
'ro': 'gen de plante',
'sq': 'gjini e bimëve',
},
'genus of reptiles': {
'ar': 'جنس من الزواحف',
'bn': 'সরীসৃপের গণ',
'ca': 'gènere de rèptils',
'en': 'genus of reptiles',
'es': 'género de reptiles',
'et': 'roomajate perekond',
'fr': 'genre de reptiles',
'he': 'סוג של זוחל',
'id': 'genus reptilia',
'io': 'genero di repteri',
'nb': 'krypdyrslekt',
'nn': 'krypdyrslekt',
'ro': 'gen de reptile',
'sq': 'e zvarranikëve',
},
'family name': {
'an': 'apelliu',
'ar': 'اسم العائلة',
'ast': 'apellíu',
'az': 'Soyad',
'bar': 'Schreibnam',
'be': 'прозвішча',
'bg': 'презиме',
'bn': 'পারিবারিক নাম',
'bs': 'prezime',
'ca': 'cognom',
'cs': 'příjmení',
'cy': 'cyfenw',
'da': 'efternavn',
'de': 'Familienname',
'de-at': 'Familienname',
'de-ch': 'Familienname',
'el': 'επώνυμο',
'en': 'family name',
'eo': 'familia nomo',
'es': 'apellido',
'et': 'perekonnanimi',
'eu': 'abizen',
'fa': 'نام خانوادگی',
'fi': 'sukunimi',
'fo': 'ættarnavn',
'fr': 'nom de famille',
'gl': 'apelido',
'gsw': 'Familiename',
'gu': 'અટક',
'he': 'שם משפחה',
'hr': 'prezime',
'hu': 'vezetéknév',
'hy': 'ազգանուն',
'id': 'nama keluarga',
'is': 'eftirnafn',
'it': 'cognome',
'ja': '姓',
'ka': 'გვარი',
'ko': '성씨',
'lb': 'Familljennumm',
'lt': 'pavardė',
'lv': 'uzvārds',
'min': 'namo asli',
'mk': 'презиме',
'nb': 'etternavn',
'nds': 'Familiennaam',
'nl': 'achternaam',
'nn': 'etternamn',
'pl': 'nazwisko',
'pt': 'sobrenome',
'pt-br': 'nome de família',
'ro': 'nume de familie',
'ru': 'фамилия',
'se': 'goargu',
'sh': 'prezime',
'sje': 'maŋŋepnamma',
'sk': 'priezvisko',
'sl': 'priimek',
'sma': 'fuelhkienomme',
'smj': 'maŋepnamma',
'sq': 'mbiemër',
'sr': 'презиме',
'sv': 'efternamn',
'tl': 'apelyido',
'tr': 'soyadı',
'uk': 'прізвище',
'zh': '姓氏',
'zh-cn': '姓氏',
'zh-hans': '姓氏',
'zh-hant': '姓氏',
'zh-hk': '姓氏',
'zh-mo': '姓氏',
'zh-my': '姓氏',
'zh-sg': '姓氏',
'zh-tw': '姓氏',
'zu': 'isibongo',
},
'female given name': {
'af': 'vroulike voornaam',
'ar': 'اسم شخصي مذكر',
'ast': 'nome femenín',
'bar': 'Weiwanam',
'be': 'жаночае асабістае імя',
'bn': 'প্রদত্ত মহিলা নাম',
'br': 'anv merc’hed',
'bs': 'žensko ime',
'ca': 'prenom femení',
'ce': 'зудчун шен цӀе',
'cs': 'ženské křestní jméno',
'cy': 'enw personol benywaidd',
'da': 'pigenavn',
'de': 'weiblicher Vorname',
'de-at': 'weiblicher Vorname',
'de-ch': 'weiblicher Vorname',
'el': 'γυναικείο όνομα',
'en': 'female given name',
'en-ca': 'female given name',
'en-gb': 'female given name',
'eo': 'virina persona nomo',
'es': 'nombre femenino',
'et': 'naisenimi',
'fa': 'نامهای زنانه',
'fi': 'naisen etunimi',
'fr': 'prénom féminin',
'fy': 'froulike foarnamme',
'he': 'שם פרטי של אישה',
'hr': 'žensko ime',
'hsb': 'žonjace předmjeno',
'hu': 'női keresztnév',
'hy': 'իգական անձնանուն',
'id': 'nama depan wanita',
'it': 'prenome femminile',
'ja': '女性の名前',
'ko': '여성의 이름',
'la': 'praenomen femininum',
'lb': 'weibleche Virnumm',
'lt': 'moteriškas vardas',
'lv': 'sieviešu personvārds',
'mk': 'женско лично име',
'nb': 'kvinnenavn',
'ne': 'स्त्रीलिङ्गी नाम',
'nl': 'vrouwelijke voornaam',
'nn': 'kvinnenamn',
'pl': 'imię żeńskie',
'pt': 'nome próprio feminino',
'pt-br': 'nome próprio feminino',
'ro': 'prenume feminin',
'ru': 'женское личное имя',
'sr': 'женско лично име',
'sr-ec': 'женско лично име',
'scn': 'nomu di battìu fimmininu',
'sco': 'female gien name',
'sk': 'ženské krstné meno',
'sl': 'žensko osebno ime',
'sq': 'emër femëror',
'sr-el': 'žensko lično ime',
'sv': 'kvinnonamn',
'tr': 'kadın ismidir',
'uk': 'жіноче особове ім’я',
'yue': '女性人名',
'zh': '女性人名',
'zh-cn': '女性人名 ',
'zh-hans': '女性人名',
'zh-hant': '女性人名',
'zh-hk': '女性人名',
'zh-mo': '女性人名',
'zh-my': '女性人名',
'zh-sg': '女性人名',
'zh-tw': '女性人名'
},
'Hebrew calendar year': {
'ar': 'سنة في التقويم العبري',
'bn': 'হিব্রু পঞ্জিকার বছর',
'ca': 'any de calendari hebreu',
'en': 'Hebrew calendar year',
'es': 'año del calendario hebreo',
'fa': 'سال در گاهشماری عبری',
'fr': 'année hébraïque',
'he': 'שנה עברית',
'hy': 'Հրեական օրացույցի տարեթիվ',
'id': 'tahun kalendar Ibrani',
'nb': 'hebraisk kalenderår',
'nn': 'hebraisk kalenderår',
'ru': 'год еврейского календаря',
'sq': 'vit i kalendarik hebraik',
},
'Islamic calendar year': {
'ar': 'سنة في التقويم الإسلامي',
'bn': 'ইসলামী পঞ্জিকার বছর',
'en': 'Islamic calendar year',
'es': 'año del calendario musulmán',
'he': 'שנה בלוח השנה המוסלמי',
'id': 'tahun kalendar Islam',
'nb': 'islamsk kalenderår',
'nn': 'islamsk kalenderår',
'sq': 'vit i kalendarik islamik',
},
'male given name': {
'af': 'manlike voornaam',
'ar': 'اسم شخصي مذكر',
'ast': 'nome masculín',
'bar': 'Mannanam',
'be': 'мужчынскае асабістае імя',
'be-tarask': 'мужчынскае асабістае імя',
'bn': 'প্রদত্ত পুরুষ নাম',
'br': 'anv paotr',
'bs': 'muško ime',
'ca': 'prenom masculí',
'ce': 'стеган шен цӀе',
'cs': 'mužské křestní jméno',
'cy': 'enw personol gwrywaidd',
'da': 'drengenavn',
'de': 'männlicher Vorname',
'de-at': 'männlicher Vorname',
'de-ch': 'männlicher Vorname',
'el': 'ανδρικό όνομα',
'en': 'male given name',
'en-ca': 'male given name',
'en-gb': 'male given name',
'eo': 'vira persona nomo',
'es': 'nombre masculino',
'et': 'mehenimi',
'eu': 'gizonezko izena',
'fa': 'نام کوچک مردانه',
'fi': 'miehen etunimi',
'fr': 'prénom masculin',
'fy': 'manlike foarnamme',
'gl': 'nome masculino',
'gsw': 'männlige Vorname',
'he': 'שם פרטי של גבר',
'hr': 'muško ime',
'hu': 'férfi keresztnév',
'hy': 'արական անձնանուն',
'id': 'nama pemberian maskulin',
'is': 'mannsnafn',
'it': 'prenome maschile',
'ja': '男性の名前',
'ko': '남성의 이름',
'la': 'praenomen masculinum',
'lb': 'männleche Virnumm',
'lt': 'vyriškas vardas',
'lv': 'vīriešu personvārds',
'mk': 'машко лично име',
'nb': 'mannsnavn',
'ne': 'पुलिङ्गी नाम',
'nl': 'mannelijke voornaam',
'nn': 'mannsnamn',
'pl': 'imię męskie',
'pt': 'nome próprio masculino',
'pt-br': 'nome próprio masculino',
'ro': 'prenume masculin',
'ru': 'мужское личное имя',
'scn': 'nomu di battìu masculinu',
'sco': 'male first name',
'sk': 'mužské meno',
'sl': 'moško osebno ime',
'sq': 'emër mashkullor',
'sr': 'мушко лично име',
'sr-el': 'muško lično ime',
'sr-ec': 'мушко лично име',
'sv': 'mansnamn',
'tr': 'erkek ismidir',
'uk': 'чоловіче особове ім’я',
'yue': '男性人名',
'zh': '男性人名',
'zh-cn': '男性人名',
'zh-hans': '男性名',
'zh-hant': '男性人名',
'zh-hk': '男性人名',
'zh-mo': '男性人名',
'zh-my': '男性人名',
'zh-sg': '男性人名',
'zh-tw': '男性人名'
},
'natural number': {
'af': 'natuurlike getal',
'als': 'natürlige Zahle',
'an': 'numero natural',
'ar': 'عدد طبيعي',
'bn': 'প্রাকৃতিক সংখ্যা',
'ca': 'nombre natural',
'cy': 'rhif naturiol',
'en': 'natural number',
'en-ca': 'natural number',
'en-gb': 'natural number',
'eo': 'natura nombro',
'es': 'número natural',
'et': 'naturaalarv',
'he': 'מספר טבעי',
'hi': 'प्राकृतिक संख्या',
'hy': 'Բնական թիվ',
'ia': 'numero natural',
'id': 'angka alami',
'io': 'naturala nombro',
'ka': 'ნატურალური რიცხვი',
'kn': 'ಸ್ವಾಭಾವಿಕ ಸಂಖ್ಯೆ',
'it': 'numero naturale',
'la': 'numerus naturalis',
'mwl': 'númaro natural',
'nb': 'naturlig tall',
'nn': 'naturleg tal',
'pms': 'nùmer natural',
'pt': 'número natural',
'ro': 'număr natural',
'scn': 'nùmmuru naturali',
'sco': 'naitural nummer',
'sc': 'nùmeru naturale',
'szl': 'naturalno nůmera',
'ru': 'натуральное число',
'sq': 'numër natyror',
'uk': 'натуральне число',
},
'species of alga': {
'bn': 'শৈবালের প্রজাতি',
'en': 'species of alga',
'es': 'especie de alga',
'et': 'vetikaliik',
'gl': 'especie de alga',
'he': 'מין של אצה',
'io': 'speco di algo',
'ro': 'specie de alge',
'sq': 'lloj i algave',
},
'species of amphibian': {
'bn': 'উভচর প্রাণীর প্রজাতি',
'ca': 'espècie d\'amfibi',
'en': 'species of amphibian',
'es': 'especie de amfibio',
'et': 'kahepaiksete liik',
'fr': 'espèce d\'amphibiens',
'io': 'speco di amfibio',
#'it': 'specie di anfibio', or anfibi?
'pt': 'espécie de anfíbio',
'he': 'מין של דו-חיים',
'ro': 'specie de amfibieni',
'sq': 'lloj i amfibeve',
},
'species of arachnid': {
'bn': 'অ্যারাকনিডের প্রজাতি',
'ca': 'espècie d\'aràcnid',
'en': 'species of arachnid',
'es': 'especie de arácnido',
'et': 'ämblikulaadsete liik',
'fr': 'espèce d\'araignées',
'io': 'speco di araneo',
'it': 'specie di ragno',
'pt': 'espécie de aracnídeo',
'he': 'מין של עכביש',
'ro': 'specie de arahnide',
},
'species of insect': { #las descripciones DE y FR tienen mayor precision y serian mas deseables
#decidir que hacer
# https://query.wikidata.org/#SELECT%20%3FitemDescription%20%28COUNT%28%3Fitem%29%20AS%20%3Fcount%29%0AWHERE%0A%7B%0A%09%3Fitem%20wdt%3AP31%20wd%3AQ16521.%0A%20%20%20%20%3Fitem%20wdt%3AP105%20wd%3AQ7432.%0A%20%20%20%20%3Fitem%20schema%3Adescription%20%22species%20of%20insect%22%40en.%0A%20%20%20%20OPTIONAL%20%7B%20%3Fitem%20schema%3Adescription%20%3FitemDescription.%20FILTER%28LANG%28%3FitemDescription%29%20%3D%20%22de%22%29.%20%20%7D%0A%09FILTER%20%28BOUND%28%3FitemDescription%29%29%0A%7D%0AGROUP%20BY%20%3FitemDescription%0AORDER%20BY%20DESC%28%3Fcount%29
'an': 'especie d\'insecto',
'bg': 'вид насекомо',
'bn': 'কীটপতঙ্গের প্রজাতি',
'ca': "espècie d'insecte",
'en': 'species of insect',
'es': 'especie de insecto',
'et': 'putukaliik',
'fr': 'espèce d\'insectes',
'gl': 'especie de insecto',
'hy': 'միջատների տեսակ',
'id': 'spesies serangga',
'io': 'speco di insekto',
'nb': 'insektart',
'nn': 'insektart',
'pt': 'espécie de inseto',
'pt-br': 'espécie de inseto',
'ro': 'specie de insecte',
'ru': 'вид насекомых',
'sq': 'lloj i insekteve',
'ta': 'பூச்சி இனம்',
'he': 'מין של חרק',
},
'species of mollusc': {
'bn': 'মলাস্কার প্রজাতি',
'ca': 'espècie de mol·lusc',
'en': 'species of mollusc',
'es': 'especie de molusco',
'et': 'limuseliik',
'gl': 'especie de molusco',
'io': 'speco di molusko',
'pt': 'espécie de molusco',
'he': 'מין של רכיכה',
'ro': 'specie de moluște',
'ru': 'вид моллюсков',
'sq': 'lloj i molusqeve',
},
'species of plant': {
'bg': 'вид растение',
'bn': 'উদ্ভিদের প্রজাতি',
'ca': 'espècie de planta',
'en': 'species of plant',
'es': 'especie de planta',
'et': 'taimeliik',
'gl': 'especie de planta',
'hy': 'բույսերի տեսակ',
'he': 'מין של צמח',
'io': 'speco di planto',
'ro': 'specie de plante',
'ru': 'вид растений',
'sq': 'lloj i bimëve',
},
'television series': {
'ca': 'sèrie de televisió',
'en': 'television series',
'eo': 'televida serio',
'es': 'serie de televisión',
'fr': 'série télévisée',
'hu': 'televíziós sorozat',
'it': 'serie televisiva',
},
'village in China': {
'an': 'pueblo d\'a Republica Popular de China', #o 'pueblo de China'
'ar': 'قرية في الصين',
'as': 'চীনৰ এখন গাওঁ',
'bn': 'চীনের একটি গ্রাম',
'bpy': 'চীনর আহান গাঙ',
'ca': 'poble de la Xina',
'cy': 'pentref yn Tsieina',
'de': 'Dorf in China',
'el': 'οικισμός της Λαϊκής Δημοκρατίας της Κίνας',
'en': 'village in China',
'eo': 'vilaĝo en Ĉinio',
'es': 'aldea de la República Popular China',
'et': 'küla Hiinas',
'fi': 'kylä Kiinassa',
'fr': 'village chinois',
'fy': 'doarp yn Sina',
'gu': 'ચીનનું ગામ',
'he': 'כפר ברפובליקה העממית של סין',
'hi': 'चीन का गाँव',
'hy': 'գյուղ Չինաստանում',
'id': 'desa di Tiongkok',
'io': 'vilajo en Chinia',
'it': 'villaggio cinese',
'ja': '中国の村',
'kn': 'ಚೈನಾ ದೇಶದ ಗ್ರಾಮ',
'mr': 'चीनमधील गाव',
'nb': 'landsby i Kina',
'ne': 'चीनका गाउँहरू',
'nn': 'landsby i Kina',
'nl': 'dorp in China',
'oc': 'vilatge chinés',
'or': 'ଚୀନର ଗାଁ',
'pt-br': 'vila chinesa',
'ur': 'چین کا گاؤں',
'ro': 'sat din China',
'ru': 'деревня КНР',
'sq': 'fshat në Kinë',
'ta': 'சீனாவின் கிராமம்',
'te': 'చైనాలో గ్రామం',
},
'Wikimedia category': { #Q4167836
'ace': 'kawan Wikimèdia',
'af': 'Wikimedia-kategorie',
'an': 'categoría de Wikimedia',
'ar': 'تصنيف ويكيميديا',
'arz': 'تصنيف بتاع ويكيميديا',
'ast': 'categoría de Wikimedia',
'ba': 'Викимедиа категорияһы',
'bar': 'Wikimedia-Kategorie',
'be': 'катэгорыя ў праекце Вікімедыя',
'be-tarask': 'катэгорыя ў праекце Вікімэдыя',
'bg': 'категория на Уикимедия',
'bho': 'विकिपीडिया:श्रेणी',
'bjn': 'tumbung Wikimedia',
'bn': 'উইকিমিডিয়া বিষয়শ্রেণী',
'br': 'pajenn rummata eus Wikimedia',
'bs': 'kategorija na Wikimediji',
'bug': 'kategori Wikimedia',
'ca': 'categoria de Wikimedia',
#'ce': 'Викимедиа проектан категореш',
#'ceb': 'Wikimedia:Kategorisasyon',
'ckb': 'پۆلی ویکیمیدیا',
'cs': 'kategorie na projektech Wikimedia',
'cy': 'tudalen categori Wikimedia',
'da': 'Wikimedia-kategori',
'de': 'Wikimedia-Kategorie',
'de-at': 'Wikimedia-Kategorie',
'de-ch': 'Wikimedia-Kategorie',
'dty': 'विकिमिडिया श्रेणी',
'el': 'κατηγορία εγχειρημάτων Wikimedia',
'en': 'Wikimedia category',
'en-ca': 'Wikimedia category',
'en-gb': 'Wikimedia category',
'eo': 'kategorio en Vikimedio',
'es': 'categoría de Wikimedia',
'et': 'Wikimedia kategooria',
'eu': 'Wikimediako kategoria',
'fa': 'ردهٔ ویکیپدیا',
'fi': 'Wikimedia-luokka',
'fo': 'Wikimedia-bólkur',
'fr': 'page de catégorie de Wikimedia',
'fy': 'Wikimedia-kategory',
'ga': 'Viciméid catagóir',
'gl': 'categoría de Wikimedia',
'gn': 'Vikimédia ñemohenda',
'gsw': 'Wikimedia-Kategorie',
'gu': 'વિકિપીડિયા શ્રેણી',
'gv': 'Wikimedia:Ronnaghys',
'he': 'דף קטגוריה',
'hi': 'विकिमीडिया श्रेणी',
'hr': 'kategorija na Wikimediji',
'hsb': 'kategorija w projektach Wikimedije',
'hu': 'Wikimédia-kategória',
'hy': 'Վիքիմեդիայի նախագծի կատեգորիա',
'ia': 'categoria Wikimedia',
'id': 'kategori Wikimedia',
'ilo': 'kategoria ti Wikimedia',
'it': 'categoria di un progetto Wikimedia',
'ja': 'ウィキメディアのカテゴリ',
'jv': 'kategori Wikimedia',
'ka': 'ვიკიპედია:კატეგორიზაცია',
'ko': '위키미디어 분류',
'ku': 'Wîkîmediya:Kategorî',
'kw': 'Wikimedia:Klassys',
'ky': 'Wikimedia категориясы',
'la': 'categoria Vicimediorum',
'lb': 'Wikimedia-Kategorie',
'li': 'Wikimedia-categorie',
'lv': 'Wikimedia projekta kategorija',
'map-bms': 'kategori Wikimedia',
'min': 'kategori Wikimedia',
'mk': 'Викимедиина категорија',
'ml': 'വിക്കിമീഡിയ വർഗ്ഗം',
'mn': 'категорияд Ангилал',
'mr': 'विकिपीडिया वर्ग',
'ms': 'kategori Wikimedia',
'my': 'Wikimedia:ကဏ္ဍခွဲခြင်း',
'nap': 'categurìa \'e nu pruggette Wikimedia',
'nb': 'Wikimedia-kategori',
'nds': 'Wikimedia-Kategorie',
'nds-nl': 'Wikimedia-kategorie',
'ne': 'विकिमिडिया श्रेणी',
'nl': 'Wikimedia-categorie',
'nn': 'Wikimedia-kategori',
'pam': 'Kategoriya ning Wikimedia',
'pl': 'kategoria w projekcie Wikimedia',
'ps': 'د ويکيمېډيا وېشنيزه',
'pt': 'categoria de um projeto da Wikimedia',
'pt-br': 'categoria de um projeto da Wikimedia',
'rmy': 'Vikipidiya:Shopni',
'ro': 'categorie în cadrul unui proiect Wikimedia',
'ru': 'категория в проекте Викимедиа',
'scn': 'catigurìa di nu pruggettu Wikimedia',
'sco': 'Wikimedia category',
'sd': 'زمرو:وڪيپيڊيا زمرا بندي',
'se': 'Wikimedia-kategoriija',
'sh': 'Wikimedia:Kategorija',
'si': 'විකිමීඩියා ප්රභේද පිටුව',
'sk': 'kategória projektov Wikimedia',
'sl': 'kategorija Wikimedije',
'sq': 'kategori e Wikimedias',
'sr': 'категорија на Викимедији',
'stq': 'Wikimedia-Kategorie',
'su': 'kategori Wikimédia',
'sv': 'Wikimedia-kategori',
'sw': 'jamii ya Wikimedia',
'ta': 'விக்கிமீடியப் பகுப்பு',
'tg': 'гурӯҳи Викимедиа',
'tg-cyrl': 'гурӯҳ дар лоиҳаи Викимедиа',
'tg-latn': 'gurühi Vikimedia',
'th': 'หน้าหมวดหมู่วิกิมีเดีย',
'tl': 'kategorya ng Wikimedia',
'tr': 'Vikimedya kategorisi',
'tt': 'Викимедиа проектындагы төркем',
'tt-cyrl': 'Викимедиа проектындагы төркем',
'tt-latn': 'Wikimedia proyektındağı törkem',
'uk': 'категорія проекту Вікімедіа',
'ur': 'ویکیمیڈیا زمرہ',
'vi': 'thể loại Wikimedia',
'yo': 'ẹ̀ka Wikimedia',
'yue': '維基媒體分類',
'zea': 'Wikimedia-categorie',
'zh': '维基媒体分类',
'zh-cn': '维基媒体分类',
'zh-hans': '维基媒体分类',
'zh-hant': '維基媒體分類',
'zh-hk': '維基媒體分類',
'zh-mo': '維基媒體分類',
'zh-my': '维基媒体分类',
'zh-sg': '维基媒体分类',
'zh-tw': '維基媒體分類',
},
'Wikimedia disambiguation page': { #Q4167410
'an': 'pachina de desambigación',
'ar': 'صفحة توضيح لويكيميديا',
'as': 'ৱিকিমিডিয়া দ্ব্যৰ্থতা দূৰীকৰণ পৃষ্ঠা',
'bg': 'Уикимедия пояснителна страница',
'bn': 'উইকিমিডিয়ার দ্ব্যর্থতা নিরসন পাতা',
'bs': 'čvor stranica na Wikimediji',
'ca': 'pàgina de desambiguació de Wikimedia',
'ckb': 'پەڕەی ڕوونکردنەوەی ویکیمیدیا',
'cs': 'rozcestník na projektech Wikimedia',
'da': 'Wikimedia-flertydigside',
'de': 'Wikimedia-Begriffsklärungsseite',
'de-at': 'Wikimedia-Begriffsklärungsseite',
'de-ch': 'Wikimedia-Begriffsklärungsseite',
'el': 'σελίδα αποσαφήνισης εγχειρημάτων Wikimedia',
'en': 'Wikimedia disambiguation page',
'en-ca': 'Wikimedia disambiguation page',
'en-gb': 'Wikimedia disambiguation page',
'eo': 'Vikimedia apartigilo',
'es': 'página de desambiguación de Wikimedia',
'et': 'Wikimedia täpsustuslehekülg',
'eu': 'Wikimediako argipen orri',
'fa': 'یک صفحهٔ ابهام\u200cزدایی در ویکی\u200cپدیا',
'fi': 'Wikimedia-täsmennyssivu',
'fr': 'page d\'homonymie de Wikimedia',
'fy': 'Wikimedia-betsjuttingsside',
'gl': 'páxina de homónimos de Wikimedia',
'gsw': 'Wikimedia-Begriffsklärigssite',
'gu': 'સ્પષ્ટતા પાનું',
'he': 'דף פירושונים',
'hi': 'बहुविकल्पी पृष्ठ',
'hr': 'razdvojbena stranica na Wikimediji',
'hu': 'Wikimédia-egyértelműsítőlap',
'hy': 'Վիքիմեդիայի նախագծի բազմիմաստության փարատման էջ',
'id': 'halaman disambiguasi Wikimedia',
'is': 'aðgreiningarsíða á Wikipediu',
'it': 'pagina di disambiguazione di un progetto Wikimedia',
'ja': 'ウィキメディアの曖昧さ回避ページ',
'ka': 'მრავალმნიშვნელოვანი',
'kn': 'ದ್ವಂದ್ವ ನಿವಾರಣೆ',
'ko': '위키미디어 동음이의어 문서',
'lb': 'Wikimedia-Homonymiesäit',
'li': 'Wikimedia-verdudelikingspazjena',
'lv': 'Wikimedia projekta nozīmju atdalīšanas lapa',
'min': 'laman disambiguasi',
'mk': 'појаснителна страница',
'ml': 'വിക്കിപീഡിയ വിവക്ഷ താൾ',
'mr': 'निःसंदिग्धीकरण पाने',
'ms': 'laman nyahkekaburan',
'nb': 'Wikimedia-pekerside',
'nds': 'Sied för en mehrdüdig Begreep op Wikimedia',
'nl': 'Wikimedia-doorverwijspagina',
'nn': 'Wikimedia-fleirtydingsside',
'or': 'ବହୁବିକଳ୍ପ ପୃଷ୍ଠା',
'pa': 'ਵਿਕੀਮੀਡੀਆ ਗੁੰਝਲਖੋਲ੍ਹ ਸਫ਼ਾ',
'pl': 'strona ujednoznaczniająca w projekcie Wikimedia',
'pt': 'página de desambiguação da Wikimedia',
'ro': 'pagină de dezambiguizare Wikimedia',
'ru': 'страница значений в проекте Викимедиа',
'sco': 'Wikimedia disambiguation page',
'sk': 'rozlišovacia stránka',
'sl': 'razločitvena stran Wikimedije',
'sq': 'faqe kthjelluese e Wikimedias',
'sr': 'вишезначна одредница на Викимедији',
'sv': 'Wikimedia-förgreningssida',
'te': 'వికీమీడియా అయోమయ నివృత్తి పేజీ',
'tg': 'саҳифаи маъноҳои Викимедиа',
'tg-cyrl': 'саҳифаи маъноҳои Викимедиа',
'tg-latn': "sahifai ma'nohoi Vikimedia",
'tr': 'Vikimedya anlam ayrımı sayfası',
'tt': 'Мәгънәләр бите Викимедиа проектында',
'tt-cyrl': 'Мәгънәләр бите Викимедиа проектында',
'tt-latn': 'Mäğnälär bite Wikimedia proyektında',
'uk': 'сторінка значень у проекті Вікімедіа',
'vi': 'trang định hướng Wikimedia',
'yo': 'ojúewé ìṣojútùú Wikimedia',
'yue': '維基媒體搞清楚頁',
'zea': 'Wikimedia-deurverwiespagina',
'zh': '维基媒体消歧义页',
'zh-cn': '维基媒体消歧义页',
'zh-hans': '维基媒体消歧义页',
'zh-hant': '維基媒體消歧義頁',
'zh-hk': '維基媒體消歧義頁',
'zh-mo': '維基媒體消歧義頁',
'zh-my': '维基媒体消歧义页',
'zh-sg': '维基媒体消歧义页',
'zh-tw': '維基媒體消歧義頁',
},
'Wikimedia list article': { #Q13406463
'ace': 'teunuléh dapeuta Wikimèdia',
'af': 'Wikimedia lysartikel',
'an': 'articlo de lista de Wikimedia',
'ar': 'قائمة ويكيميديا',
'as': 'ৱিকিপিডিয়া:ৰচনাশৈলীৰ হাতপুথি',
'ast': 'artículu de llista de Wikimedia',
'ba': 'Wikimedia-Listn',
'be': 'спіс атыкулаў у адным з праектаў Вікімедыя',
'bn': 'উইকিমিডিয়ার তালিকা নিবন্ধ',
'bs': 'spisak na Wikimediji',
'ca': 'article de llista de Wikimedia',
'cs': 'seznam na projektech Wikimedia',
'da': 'Wikimedia liste',
'de': 'Wikimedia-Liste',
'de-at': 'Wikimedia-Liste',
'de-ch': 'Wikimedia-Liste',
'el': 'κατάλογος εγχειρήματος Wikimedia',
'en': 'Wikimedia list article',
'en-ca': 'Wikimedia list article',
'en-gb': 'Wikimedia list article',
'eo': 'listartikolo en Vikimedio',
'es': 'artículo de lista de Wikimedia',
'et': 'Wikimedia loend',
'eu': 'Wikimediako zerrenda artikulua',
'fi': 'Wikimedia-luetteloartikkeli',
'fr': 'page de liste de Wikimedia',
'fy': 'Wikimedia-list',
'gl': 'artigo de listas da Wikimedia',
'he': 'רשימת ערכים',
'hr': 'popis na Wikimediji',
'hy': 'Վիքիմեդիայի նախագծի ցանկ',
'id': 'artikel daftar Wikimedia',
'ia': 'lista de un projecto de Wikimedia',
'it': 'lista di un progetto Wikimedia',
'ja': 'ウィキメディアの一覧記事',
'ko': '위키미디어 목록 항목',
'lb': 'Wikimedia-Lëschtenartikel',
'li': 'Wikimedia-lies',
'mk': 'список на статии на Викимедија',
'ms': 'rencana senarai Wikimedia',
'nb': 'Wikimedia-listeartikkel',
'nl': 'Wikimedia-lijst',
'nn': 'Wikimedia-listeartikkel',
'oc': 'lista d\'un projècte Wikimèdia',
'pl': 'lista w projekcie Wikimedia',
'ro': 'articol-listă în cadrul unui proiect Wikimedia',
'ru': 'статья-список в проекте Викимедиа',
'sco': 'Wikimedia leet airticle',
'si': 'විකිමීඩියා ලැයිස්තු ලිපිය',
'sk': 'zoznamový článok projektov Wikimedia',
'sl': 'seznam Wikimedije',
'sq': 'artikull-listë e Wikimedias',
'sr': 'списак на Викимедији',
'sv': 'Wikimedia-listartikel',
'ta': 'விக்கிப்பீடியா:பட்டியலிடல்',
'tg': 'саҳифаи феҳристӣ',
'tg-cyrl': 'мақолаи феҳристӣ',
'tg-latn': 'sahifai fehristī',
'th': 'บทความรายชื่อวิกิมีเดีย',
'tr': 'Vikimedya liste maddesi',
'uk': 'сторінка-список у проекті Вікімедіа',
'vi': 'bài viết danh sách Wikimedia',
'yi': 'וויקימעדיע ליסטע',
'yo': 'ojúewé àtojọ Wikimedia',
'zea': 'Wikimedia-lieste',
'zh': '维基媒体列表条目',
'zh-cn': '维基媒体列表条目',
'zh-hans': '维基媒体列表条目',
'zh-hant': '維基媒體列表條目',
'zh-hk': '維基媒體列表條目',
'zh-mo': '維基媒體列表條目',
'zh-my': '维基媒体列表条目',
'zh-sg': '维基媒体列表条目',
'zh-tw': '維基媒體列表條目'
},
'Wikimedia template': { #Q11266439
'an': 'plantilla de Wikimedia',
'ar': 'قالب ويكيميديا',
'arz': 'ويكيبيديا:قوالب',
'ast': 'plantía de proyectu',
'ba': 'Викимедиа ҡалыбы',
'bar': 'Wikimedia-Vorlog',
'be': 'шаблон праекта Вікімедыя',
'be-tarask': 'шаблён праекту Вікімэдыя',
'bg': 'Уикимедия шаблон',
'bn': 'উইকিমিডিয়া টেমপ্লেট',
'bs': 'šablon Wikimedia',
'ca': 'plantilla de Wikimedia',
'ce': 'Викимедин проектан кеп',
'cs': 'šablona na projektech Wikimedia',
'cy': 'nodyn Wikimedia',
'da': 'Wikimedia-skabelon',
'de': 'Wikimedia-Vorlage',
'el': 'Πρότυπο εγχειρήματος Wikimedia',
'en': 'Wikimedia template',
'en-ca': 'Wikimedia template',
'en-gb': 'Wikimedia template',
'eo': 'Vikimedia ŝablono',
'es': 'plantilla de Wikimedia',
'et': 'Wikimedia mall',
'eu': 'Wikimediako txantiloia',
'fa': 'الگوی ویکیمدیا',
'fi': 'Wikimedia-malline',
'fo': 'fyrimynd Wikimedia',
'fr': 'modèle de Wikimedia',
'frr': 'Wikimedia-föörlaag',
'fy': 'Wikimedia-berjocht',
'gl': 'modelo da Wikimedia',
'gsw': 'Wikimedia-Vorlage',
'gu': 'વિકિપીડિયા ઢાંચો',
'he': 'תבנית של ויקימדיה',
'hu': 'Wikimédia-sablon',
'hy': 'Վիքիմեդիայի նախագծի կաղապար',
'id': 'templat Wikimedia',
'ilo': 'plantilia ti Wikimedia',
'it': 'template di un progetto Wikimedia',
'ja': 'ウィキメディアのテンプレート',
'jv': 'cithakan Wikimedia',
'ka': 'ვიკიმედიის თარგი',
'ko': '위키미디어 틀',
'ku-latn': 'şablona Wîkîmediyayê',
'la': 'formula Vicimediorum',
'lb': 'Wikimedia-Schabloun',
'li': 'Wikimedia-sjabloon',
'lt': 'Vikimedijos šablonas',
'lv': 'Wikimedia projekta veidne',
'mk': 'шаблон на Викимедија',
'ml': 'വിക്കിമീഡിയ ഫലകം',
'mr': 'विकिपीडिया:साचा',
'ms': 'Templat Wikimedia',
'nb': 'Wikimedia-mal',
'nds': 'Wikimedia-Vörlaag',
'nds-nl': 'Wikimedia-mal',
'nl': 'Wikimedia-sjabloon',
'nn': 'Wikimedia-mal',
'oc': 'modèl de Wikimèdia',
'or': 'ଉଇକିମିଡ଼ିଆ ଛାଞ୍ଚ',
'pam': 'Ulmang pang-Wikimedia',
'pl': 'szablon w projekcie Wikimedia',
'ps': 'ويکيمېډيا کينډۍ',
'pt': 'predefinição da Wikimedia',
'pt-br': 'predefinição da Wikimedia',
'ro': 'format Wikimedia',
'ru': 'шаблон проекта Викимедиа',
'sco': 'Wikimedia template',
'se': 'Wikimedia-málle',
'sk': 'šablóna projektov Wikimedia',
'sq': 'stampë e Wikimedias',
'sr': 'Викимедијин шаблон',
'sr-ec': 'Викимедијин шаблон',
'stq': 'Wikimedia-Foarloage',
'sv': 'Wikimedia-mall',
'sw': 'kigezo cha Wikimedia',
'ta': 'விக்கிமீடியா வார்ப்புரு',
'te': 'వికీమీడియా మూస',
'tg': 'шаблони лоиҳаи Викимедиа',
'tg-cyrl': 'шаблони лоиҳаи Викимедиа',
'tg-latn': 'shabloni loihai Vikimedia',
'th': 'หน้าแม่แบบวิกิมีเดีย',
'tl': 'Padrong pang-Wikimedia',
'tr': 'Vikimedya şablonu',
'uk': 'шаблон у проекті Вікімедіа',
'vi': 'bản mẫu Wikimedia',
'yo': 'àdàkọ Wikimedia',
'yue': '維基媒體模',
'zea': 'Wikimedia-sjabloon',
'zh': '维基媒体模板',
'zh-cn': '维基媒体模板',
'zh-hans': '维基媒体模板',
'zh-hant': '維基媒體模板',
'zh-hk': '維基媒體模板',
'zh-tw': '維基媒體模板',
},
'Wikinews article': { #Q17633526
'an': 'articlo de Wikinews',
'ar': 'مقالة ويكي أخبار',
'bar': 'Artike bei Wikinews',
'bn': 'উইকিসংবাদের নিবন্ধ',
'bs': 'Wikinews članak',
'ca': 'article de Viquinotícies',
'cs': 'článek na Wikizprávách',
'da': 'Wikinews-artikel',
'de': 'Artikel bei Wikinews',
'el': 'Άρθρο των Βικινέων',
'en': 'Wikinews article',
'en-ca': 'Wikinews article',
'en-gb': 'Wikinews article',
'eo': 'artikolo de Vikinovaĵoj',
'es': 'artículo de Wikinoticias',
'eu': 'Wikialbisteakeko artikulua',
'fi': 'Wikiuutisten artikkeli',
'fr': 'article de Wikinews',
'fy': 'Wikinews-artikel',
'he': 'כתבה בוויקיחדשות',
'hu': 'Wikihírek-cikk',
'hy': 'Վիքիլուրերի հոդված',
'id': 'artikel Wikinews',
'it': 'articolo di Wikinotizie',
'ja': 'ウィキニュースの記事',
'ko': '위키뉴스 기사',
'ku-latn': 'gotara li ser Wîkînûçeyê',
'li': 'Wikinews-artikel',
'lt': 'Vikinaujienų straipsnis',
'mk': 'напис на Викивести',
'nb': 'Wikinytt-artikkel',
'nl': 'Wikinieuws-artikel',
'nn': 'Wikinytt-artikkel',
'or': 'ଉଇକି ସୂଚନା ପତ୍ରିକା',
'pl': 'artykuł w Wikinews',
'ps': 'د ويکيخبرونو ليکنه',
'pt': 'artigo do Wikinotícias',
'ro': 'articol în Wikiștiri',
'ru': 'статья Викиновостей',
'sq': 'artikull i Wikinews',
'sr': 'чланак са Викивести',
'sv': 'Wikinews-artikel',
'te': 'వికీవార్త వ్యాసం',
'tg': 'саҳифаи Викиахбор',
'tg-cyrl': 'саҳифаи Викиахбор',
'tg-latn': 'sahifai Vikiakhbor',
'th': 'เนื้อหาวิกิข่าว',
'tr': 'Vikihaber maddesi',
'uk': 'стаття Вікіновин',
'zea': 'Wikinews-artikel',
'zh': '維基新聞新聞稿',
'zh-cn': '维基新闻新闻稿',
'zh-hans': '维基新闻新闻稿',
'zh-hant': '維基新聞新聞稿',
'zh-hk': '維基新聞新聞稿',
'zh-mo': '維基新聞新聞稿',
'zh-my': '维基新闻新闻稿',
'zh-sg': '维基新闻新闻稿',
'zh-tw': '維基新聞新聞稿',
},
'year': {
'af': 'jaar',
'an': 'anyo',
'ar': 'سنة',
'ast': 'añu',
'be': 'год',
'be-tarask': 'год',
'bg': 'година',
'bn': 'বছর',
'br': 'bloavezh',
'bs': 'godina',
'ca': 'any',
'cs': 'rok',
'cy': 'blwyddyn',
'da': 'år',
'de': 'Jahr',
'el': 'έτος',
'en': 'year',
'en-ca': 'year',
'en-gb': 'year',
'eo': 'jaro',
'es': 'año',
'et': 'aasta',
'fi': 'vuosi',
'fr': 'année',
'fy': 'jier',
'gl': 'ano',
'gsw': 'joor',
'he': 'שנה',
'hr': 'Godina',
'ht': 'Lane',
'hu': 'Év',
'hy': 'տարեթիվ',
'ia': 'anno',
'id': 'tahun',
'ilo': 'tawen',
'io': 'yaro',
'is': 'ár',
'it': 'anno',
'ja': '年',
'ka': 'წელი',
'ko': '연도',
'ku': 'Sal',
'la': 'annus',
'lt': 'Metai',
'lv': 'gads',
'mhr': 'Идалык',
'min': 'taun',
'mk': 'година',
'ms': 'Tahun',
'nan': 'nî',
'nb': 'år',
'nds': 'Johr',
'nl': 'jaar',
'nn': 'år',
'or': 'ବର୍ଷ',
'pl': 'rok',
'pt': 'ano',
'ro': 'an',
'ru': 'год',
'sh': 'godina',
'sk': 'Rok',
'sl': 'Leto',
#'sq': 'vit', or viti?
'sr': 'Година',
'srn': 'Yari',
'sv': 'år',
'th': 'ปี',
'tl': 'taon',
'tr': 'yıl',
'uk': 'рік',
'vo': 'yel',
'vi': 'năm',
'war': 'Tuig',
'yi': 'יאר',
'yue': '年',
'zh': '年',
'zh-hans': '年份',
'zh-hant': '年份',
},
}
autotranslations = []
autotranslations.append(genTranslationsByCountry(desc='bay in ~'))
autotranslations.append(genTranslationsByCountry(desc='bight in ~'))
autotranslations.append(genTranslationsByCountry(desc='cape in ~'))
autotranslations.append(genTranslationsByCountry(desc='cave in ~'))
autotranslations.append(genTranslationsByCountry(desc='dune in ~'))
autotranslations.append(genTranslationsByCountry(desc='glacier in ~'))
autotranslations.append(genTranslationsByCountry(desc='hill in ~'))
autotranslations.append(genTranslationsByCountry(desc='island in ~'))
autotranslations.append(genTranslationsByCountry(desc='lagoon in ~'))
autotranslations.append(genTranslationsByCountry(desc='lake in ~'))
autotranslations.append(genTranslationsByCountry(desc='mine in ~'))
autotranslations.append(genTranslationsByCountry(desc='mountain in ~'))
autotranslations.append(genTranslationsByCountry(desc='plain in ~'))
autotranslations.append(genTranslationsByCountry(desc='reef in ~'))
autotranslations.append(genTranslationsByCountry(desc='reservoir in ~'))
autotranslations.append(genTranslationsByCountry(desc='river in ~'))
autotranslations.append(genTranslationsByCountry(desc='road in ~'))
autotranslations.append(genTranslationsByCountry(desc='spring in ~'))
autotranslations.append(genTranslationsByCountry(desc='stream in ~'))
autotranslations.append(genTranslationsByCountry(desc='swamp in ~'))
autotranslations.append(genTranslationsByCountry(desc='valley in ~'))
autotranslations.append(genTranslationsByCountry(desc='watercourse in ~'))
for autotranslation in autotranslations:
for k, v in autotranslation.items():
translations[k] = v
site = pywikibot.Site('wikidata', 'wikidata')
repo = site.data_repository()
querylimit = 10000
queries = {
'asteroid': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q3863 ;
wdt:P31 ?instance .
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(1, 300000, querylimit)
],
'chemical compound': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q11173 ;
wdt:P31 ?instance .
#?item schema:description "chemical compound"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(1, 250000, querylimit)
],
'douar in Morocco': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q23925393 ;
wdt:P31 ?instance .
?item schema:description "douar in Morocco"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(1, 50000, querylimit)
],
'encyclopedic article': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q17329259 ;
wdt:P31 ?instance .
?item schema:description "encyclopedic article"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 500000, querylimit)
],
'entry in Dictionary of National Biography': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q19389637 ;
wdt:P31 ?instance .
?item schema:description "entry in Dictionary of National Biography"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 50000, querylimit)
],
'extrasolar planet': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q44559 ;
wdt:P31 ?instance .
?item schema:description "extrasolar planet"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 10000, querylimit)
],
'family name': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q101352 ;
wdt:P31 ?instance .
?item schema:description "family name"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 200000, querylimit)
],
'family of crustaceans': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q35409.
?item schema:description "family of crustaceans"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'family of insects': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q35409.
?item schema:description "family of insects"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'family of molluscs': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q35409.
?item schema:description "family of molluscs"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'family of plants': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q35409.
?item schema:description "family of plants"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'female given name': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q11879590 ;
wdt:P31 ?instance .
?item schema:description "female given name"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'galaxy': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q318 ;
wdt:P31 ?instance .
?item schema:description "galaxy"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'genus of algae': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of algae"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 10000, querylimit)
],
'genus of amphibians': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of amphibians"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 1000, querylimit)
],
'genus of arachnids': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of arachnids"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 20000, querylimit)
],
'genus of birds': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of birds"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 5000, querylimit)
],
'genus of fishes': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of fishes"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 10000, querylimit)
],
'genus of fungi': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of fungi"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 20000, querylimit)
],
'genus of insects': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of insects"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 100000, querylimit)
],
'genus of mammals': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of mammals"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 10000, querylimit)
],
'genus of molluscs': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of molluscs"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 20000, querylimit)
],
'genus of plants': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of plants"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 50000, querylimit)
],
'genus of reptiles': [
"""
SELECT ?item
WHERE {
?item wdt:P105 wd:Q34740 ;
wdt:P105 ?instance .
?item schema:description "genus of reptiles"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 5000, querylimit)
],
'Hebrew calendar year': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q577 ;
wdt:P31 ?instance .
?item schema:description "Hebrew calendar year"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'Islamic calendar year': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q577 ;
wdt:P31 ?instance .
?item wdt:P361 wd:Q28892 .
?item schema:description "Islamic calendar year"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'male given name': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q12308941 ;
wdt:P31 ?instance .
?item schema:description "male given name"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
#'natural number': ['https://query.wikidata.org/bigdata/namespace/wdq/sparql?query=SELECT%20%3Fitem%0AWHERE%20%7B%0A%09%3Fitem%20wdt%3AP31%20wd%3AQ21199%20.%0A%20%20%20%20FILTER%20NOT%20EXISTS%20%7B%20%3Fitem%20wdt%3AP31%20wd%3AQ200227%20%7D%20.%20%0A%20%20%20%20%3Fitem%20schema%3Adescription%20%22natural%20number%22%40en.%0A%7D%0A'],
#'scientific article': [''], # use scientific.articles.py // hay quien pone la fecha https://www.wikidata.org/wiki/Q19983493
'species of alga': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q7432.
?item schema:description "species of alga"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'species of amphibian': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q7432.
?item schema:description "species of amphibian"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'species of arachnid': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q7432.
?item schema:description "species of arachnid"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'species of insect': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q7432.
?item schema:description "species of insect"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 1000000, querylimit)
],
'species of mollusc': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q7432.
?item schema:description "species of mollusc"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'species of plant': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q16521 ;
wdt:P31 ?instance .
?item wdt:P105 wd:Q7432.
?item schema:description "species of plant"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 600000, querylimit)
],
'television series': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q5398426 ;
wdt:P31 ?instance .
?item schema:description "television series"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 100000, querylimit)
],
#'village in China': ['https://query.wikidata.org/bigdata/namespace/wdq/sparql?query=SELECT%20%3Fitem%0AWHERE%0A%7B%0A%09%3Fitem%20wdt%3AP31%20wd%3AQ13100073%20%3B%0A%20%20%20%20%20%20%20%20%20%20wdt%3AP31%20%3Finstance%20.%0A%7D%0AGROUP%20BY%20%3Fitem%0AHAVING(COUNT(%3Finstance)%20%3D%201)'],
'Wikimedia category': [
"""
SELECT ?item
WHERE {
SERVICE bd:sample {
?item wdt:P31 wd:Q4167836 .
bd:serviceParam bd:sample.limit %s .
bd:serviceParam bd:sample.sampleType "RANDOM" .
}
?item schema:description "Wikimedia category"@en.
OPTIONAL { ?item schema:description ?itemDescription. FILTER(LANG(?itemDescription) = "%s"). }
FILTER (!BOUND(?itemDescription))
}
#random%s
""" % (str(querylimit+i), random.choice(list(translations['Wikimedia category'].keys())), random.randint(1,1000000)) for i in range(1, 10000)
],
'Wikimedia disambiguation page': [
"""
SELECT ?item
WHERE {
SERVICE bd:sample {
?item wdt:P31 wd:Q4167410 .
bd:serviceParam bd:sample.limit %s .
bd:serviceParam bd:sample.sampleType "RANDOM" .
}
?item schema:description "Wikimedia disambiguation page"@en.
OPTIONAL { ?item schema:description ?itemDescription. FILTER(LANG(?itemDescription) = "%s"). }
FILTER (!BOUND(?itemDescription))
}
#random%s
""" % (str(querylimit+i), random.choice(list(translations['Wikimedia disambiguation page'].keys())), random.randint(1,1000000)) for i in range(1, 10000)
],
'Wikimedia list article': [
"""
SELECT ?item
WHERE
{
?item wdt:P31 wd:Q13406463 ;
wdt:P31 ?instance .
?item schema:description "Wikimedia list article"@en.
#OPTIONAL { ?item schema:description ?itemDescription. FILTER(LANG(?itemDescription) = "es"). }
#FILTER (!BOUND(?itemDescription))
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 500000, querylimit)
],
#'Wikimedia list article': ['https://query.wikidata.org/bigdata/namespace/wdq/sparql?query=SELECT%20%3Fitem%0AWHERE%0A%7B%0A%09%3Fitem%20wdt%3AP31%20wd%3AQ13406463%20%3B%0A%20%20%20%20%20%20%20%20%20%20wdt%3AP31%20%3Finstance%20.%0A%20%20%20%20%3Fitem%20schema%3Adescription%20%22Wikimedia%20list%20article%22%40en.%0A%20%20%20%20OPTIONAL%20%7B%20%3Fitem%20schema%3Adescription%20%3FitemDescription.%20FILTER(LANG(%3FitemDescription)%20%3D%20%22es%22).%20%20%7D%0A%09FILTER%20(!BOUND(%3FitemDescription))%0A%7D%0AGROUP%20BY%20%3Fitem%0AHAVING(COUNT(%3Finstance)%20%3D%201)'], #lists with language selector enabled
#'Wikimedia list article': ['https://query.wikidata.org/bigdata/namespace/wdq/sparql?query=SELECT%20%3Fitem%0AWHERE%0A%7B%0A%09%3Fitem%20wdt%3AP31%20wd%3AQ13406463%20%3B%0A%20%20%20%20%20%20%20%20%20%20wdt%3AP31%20%3Finstance%20.%0A%20%20%20%20%23%3Fitem%20schema%3Adescription%20%22Wikimedia%20list%20article%22%40en.%0A%20%20%20%20%23OPTIONAL%20%7B%20%3Fitem%20schema%3Adescription%20%3FitemDescription.%20FILTER(LANG(%3FitemDescription)%20%3D%20%22es%22).%20%20%7D%0A%09%23FILTER%20(!BOUND(%3FitemDescription))%0A%7D%0AGROUP%20BY%20%3Fitem%0AHAVING(COUNT(%3Finstance)%20%3D%201)'], #lists even without english description
'Wikimedia template': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q11266439 ;
wdt:P31 ?instance .
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
LIMIT %s
OFFSET %s
""" % (str(querylimit), str(offset)) for offset in range(0, 1000000, querylimit)
],
'Wikinews article': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q17633526 ;
wdt:P31 ?instance .
#?item schema:description "Wikinews article"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
'year': [
"""
SELECT ?item
WHERE {
?item wdt:P31 wd:Q577 ;
wdt:P31 ?instance .
?item schema:description "year"@en.
}
GROUP BY ?item
HAVING(COUNT(?instance) = 1)
"""
],
}
autoqueries = []
autoqueries.append(genQueriesByCountry(p31='Q39594', desc='bay in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q17018380', desc='bight in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q185113', desc='cape in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q35509', desc='cave in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q25391', desc='dune in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q35666', desc='glacier in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q54050', desc='hill in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q23442', desc='island in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q187223', desc='lagoon in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q23397', desc='lake in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q820477', desc='mine in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q8502', desc='mountain in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q160091', desc='plain in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q184358', desc='reef in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q131681', desc='reservoir in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q4022', desc='river in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q34442', desc='road in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q124714', desc='spring in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q47521', desc='stream in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q166735', desc='swamp in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q39816', desc='valley in ~', desclang='en'))
autoqueries.append(genQueriesByCountry(p31='Q355304', desc='watercourse in ~', desclang='en'))
for autoquery in autoqueries:
for k, v in autoquery.items():
queries[k] = v
queries_list = [x for x in queries.keys()]
queries_list.sort()
skip = ''
topics = [ #uncomment topics you want to run the bot on
#'asteroid',
#'chemical compound',
#'douar in Morocco',
#'encyclopedic article',
#'family name',
#'female given name',
#'male given name',
#'family of crustaceans',
#'family of insects',
#'family of molluscs',
#'family of plants',
#'genus of algae',
#'genus of amphibians',
#'genus of arachnids',
#'genus of birds',
#'genus of fishes',
#'genus of fungi',
#'genus of insects',
#'genus of mammals',
#'genus of molluscs',
#'genus of plants',
#'genus of reptiles',
#'year',
#'Hebrew calendar year',
#'Islamic calendar year',
#'species of alga',
#'species of amphibian',
#'species of arachnid',
#'species of insect',
#'species of mollusc',
#'species of plant',
#'Wikimedia category',
#'Wikimedia disambiguation page',
#'Wikimedia list article',
#'Wikimedia template',
]
topicarg = ''
if len(sys.argv) > 1:
topicarg = sys.argv[1]
for topic in queries_list:
topic_ = re.sub(' ', '-', topic.lower())
topicarg_ = re.sub(' ', '-', topicarg.lower())
if topicarg:
if topic_ != topicarg_ and not (topicarg_.endswith('-') and topic_.startswith(topicarg_)):
continue
elif not topic in topics:
continue
c = 0
ctotal = 0
for url in queries[topic]:
url = url.strip()
if not url.startswith('http'):
url = 'https://query.wikidata.org/bigdata/namespace/wdq/sparql?query=%s' % (urllib.parse.quote(url))
url = '%s&format=json' % (url)
print("Loading...", url)
sparql = getURL(url=url)
json1 = loadSPARQL(sparql=sparql)
qlist = []
for result in json1['results']['bindings']:
q = 'item' in result and result['item']['value'].split('/entity/')[1] or ''
if q:
qlist.append(q)
if not qlist: #empty query result? maybe no more Q
break
ctotal += len(qlist)
for q in qlist:
c += 1
print('\n== %s [%s] [%d of %d] ==' % (q, topic, c, ctotal))
if skip:
if q != skip:
print('Skiping...')
continue
else:
skip = ''
item = pywikibot.ItemPage(repo, q)
try: #to detect Redirect because .isRedirectPage fails
item.get()
except:
print('Error while .get()')
continue
#skiping items with en: sitelinks (temporal patch)
#sitelinks = item.sitelinks
#if 'enwiki' in sitelinks:
# continue
descriptions = item.descriptions
addedlangs = []
fixedlangs = []
for lang in translations[topic].keys():
if lang in descriptions.keys():
if topic in fixthiswhenfound and \
lang in fixthiswhenfound[topic] and \
descriptions[lang] in fixthiswhenfound[topic][lang]:
descriptions[lang] = translations[topic][lang]
fixedlangs.append(lang)
else:
descriptions[lang] = translations[topic][lang]
addedlangs.append(lang)
if addedlangs or fixedlangs:
data = { 'descriptions': descriptions }
addedlangs.sort()
fixedlangs.sort()
summary = 'BOT - '
if addedlangs:
if fixedlangs:
summary += 'Adding descriptions (%s languages): %s' % (len(addedlangs), ', '.join(addedlangs))
summary += ' / Fixing descriptions (%s languages): %s' % (len(fixedlangs), ', '.join(fixedlangs))
else:
summary += 'Adding descriptions (%s languages): %s' % (len(addedlangs), ', '.join(addedlangs))
else:
if fixedlangs:
summary += 'Fixing descriptions (%s languages): %s' % (len(fixedlangs), ', '.join(fixedlangs))
print(summary)
try:
item.editEntity(data, summary=summary)
#break
except:
print('Error while saving')
continue
print("Finished successfully")
if __name__ == "__main__":
main()
|
emijrp/wikidata
|
common.descriptions.py
|
Python
|
gpl-3.0
| 128,417
|
[
"Galaxy"
] |
07e2bbbea019d421185fb5ee9f124507c21901a6a9f004e90d9a74a9c623155b
|
# coding: utf-8
"""
Collection of low-level tools that faciliate the interface with resource managers.
The preferred way of importing this module is:
import qutils as qu
"""
from __future__ import print_function, division, unicode_literals
from monty.string import is_string
from pymatgen.core.units import Time, Memory
import logging
logger = logging.getLogger(__name__)
def slurm_parse_timestr(s):
"""
A slurm time parser. Accepts a string in one the following forms:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
Returns:
Time in seconds.
Raises:
`ValueError` if string is not valid.
"""
days, hours, minutes, seconds = 0, 0, 0, 0
if type(s) == type(1):
return Time(s, "s")
if '-' in s:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
days, s = s.split("-")
days = int(days)
if ':' not in s:
hours = int(float(s))
elif s.count(':') == 1:
hours, minutes = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More that 2 ':' in string!")
else:
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
if ':' not in s:
minutes = int(float(s))
elif s.count(':') == 1:
minutes, seconds = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More than 2 ':' in string!")
return Time((days*24 + hours)*3600 + minutes*60 + seconds, "s")
def time2slurm(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the slurm convention: "days-hours:minutes:seconds".
>>> assert time2slurm(61) == '0-0:1:1' and time2slurm(60*60+1) == '0-1:0:1'
>>> assert time2slurm(0.5, unit="h") == '0-0:30:0'
"""
d, h, m, s = 24*3600, 3600, 60, 1
timeval = Time(timeval, unit).to("s")
days, hours = divmod(timeval, d)
hours, minutes = divmod(hours, h)
minutes, secs = divmod(minutes, m)
return "%d-%d:%d:%d" % (days, hours, minutes, secs)
def time2pbspro(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the PbsPro convention: "hours:minutes:seconds".
>>> assert time2pbspro(2, unit="d") == '48:0:0'
"""
h, m, s = 3600, 60, 1
timeval = Time(timeval, unit).to("s")
hours, minutes = divmod(timeval, h)
minutes, secs = divmod(minutes, m)
return "%d:%d:%d" % (hours, minutes, secs)
def time2loadlever(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the LoadLever convention. format hh:mm:ss (hours:minutes:seconds)
>>> assert time2loadlever(2, unit="d") == '48:00:00'
"""
h, m, s = 3600, 60, 1
timeval = Time(timeval, unit).to("s")
hours, minutes = divmod(timeval, h)
minutes, secs = divmod(minutes, m)
return "%d:%02d:%02d" % (hours, minutes, secs)
def timelimit_parser(s):
"""Convert a float or a string into time in seconds."""
try:
return Time(float(s), "s")
except ValueError:
return slurm_parse_timestr(s)
def any2mb(s):
"""Convert string or number to memory in megabytes."""
if is_string(s):
return int(Memory.from_string(s).to("Mb"))
else:
return int(s)
|
matk86/pymatgen
|
pymatgen/io/abinit/qutils.py
|
Python
|
mit
| 3,763
|
[
"pymatgen"
] |
6e3d0afbadba7efccd1d153ef30c661de6a416de21209213bd72cee7bb28b098
|
# -*- coding: utf-8 -*-
__author__ = 'Steven Christe'
from astropy.io import fits
from skimage import filter
from skimage.transform import hough_circle
from skimage.feature import peak_local_max
from skimage.draw import circle_perimeter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from datetime import datetime
class image(object):
def __init__(self, filename):
"""
A class to handle a SAAS fits image.
:param filename: Path to SAAS fits file as a string.
properties
----------
data: holds the data in the fits file
header: holds the fits header (i.e. meta data)
roi: the region of interest [x1, x2, y1, y2]
fov: the field of view of the Region of Interest
calibrated_center: the calibrated center of the SAAS [x, y]
max_index: the index of the maximum pixel
exposure: exposure time in seconds
date: datetime object when image was taken
"""
self.filename = filename
try:
f = fits.open(filename)
except Exception:
print("Warning. Can't open file %f" % filename)
f = None
if f is not None:
self.data = f[1].data
self.data = self.data.astype(np.ubyte)
self.header = f[0].header
self.max_index = np.unravel_index(self.data.argmax(), np.shape(self.data))
self.fov = np.array([100, 100])
self.roi = None
# Set the default roi as the entire image
self.roi_reset()
self.calibrated_center = np.array([659, 483])
self.exposure = self.header.get('EXPTIME')
self.gain_preamp = self.header.get('GAIN_PRE')
self.gain_analog = self.header.get('GAIN_ANA')
self.date = datetime.strptime(self.header.get("DATE_OBS"), "%c")
self.max = np.max(self.data)
self.min = np.min(self.data)
self.std = np.std(self.data)
def imshow(self):
"""
Plot the image.
"""
ax = plt.imshow(self.roi_data, origin='upper', cmap=cm.Greys_r, vmin=0, vmax=255)
plt.title('FOXSI SAAS ' + self.header['DATE_OBS'])
ax.set_interpolation('nearest')
def overlay(self):
"""
Overplot a pre-defined overlay onto the image plot.
"""
plt.plot(self.calibrated_center[0], self.calibrated_center[1], 'x')
plt.plot(self.max_index[1], self.max_index[0], ".")
plt.axhline(self.calibrated_center[1], self.calibrated_center[1])
plt.axvline(self.calibrated_center[0], self.calibrated_center[0])
# circle1 = plt.circle(self.calibrated_center, 10)
#circle2 = plt.circle(self.calibrated_center, 20)
#circle3 = plt.circle(self.calibrated_center, 30)
#fig = plt.gcf()
#fig.gca().add_artist(circle1)
#fig.gca().add_artist(circle2)
#fig.gca().add_artist(circle3)
def roi_auto(self):
"""
Set the region of interest (ROI) automatically centered around the image max.
"""
self.roi = np.array([self.max_index[1] - self.fov[0] * 0.5, self.max_index[1] + self.fov[0] * 0.5,
self.max_index[0] - self.fov[1] * 0.5, self.max_index[0] + self.fov[1] * 0.5])
@property
def roi_data(self):
"""
The data inside the ROI.
:return: nd.array
"""
return self.data[self.roi[2]:self.roi[3], self.roi[0]:self.roi[1]]
def roi_reset(self):
"""
Set the region of interest (ROI) to the entire image.
"""
self.roi = [0, np.shape(self.data)[1], 0, np.shape(self.data)[0]]
def hist(self):
"""
Plot a histogram of the image.
"""
plt.hist(self.roi_data.ravel())
def set_fov(self, fov):
"""
Set the field of the view of the ROI.
"""
self.fov = fov
self.roi = np.array([self.max_index[1] - self.fov[0] * 0.5, self.max_index[1] + self.fov[0] * 0.5,
self.max_index[0] - self.fov[1] * 0.5, self.max_index[0] + self.fov[1] * 0.5])
def find_center(saas_image, sigma=0.8, num_circles=5):
"""
Find the center of the image
:param saas_image: A SAAS image object.
:param sigma: The amount of gaussian blurring
:param num_circles: The number of circles to find.
Returns:
"""
edges = filter.canny(saas_image.roi_data, sigma=sigma)
hough_radii = np.arange(10, 70, 1)
hough_res = hough_circle(edges, hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract two circles
peaks = peak_local_max(h, num_peaks=2)
if peaks != []:
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius, radius])
best_centers = []
best_radii = []
best_x = []
best_y = []
number_of_best_circles = num_circles
for idx in np.argsort(accums)[::-1][:number_of_best_circles]:
center_x, center_y = centers[idx]
best_x.append(center_x)
best_y.append(center_y)
best_centers.append(centers[idx])
best_radii.append(radii[idx])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 6))
ax1.imshow(edges)
ax2.imshow(self.roi_data, cmap=cm.gray)
for center, radius in zip(best_centers, best_radii):
circle = plt.Circle((center[1], center[0]), radius, color='r', fill=False)
ax2.add_patch(circle)
print("Calibrated Center X = %s +/- %s" % (np.average(best_x), np.std(best_x)))
print("Calibrated Center Y = %s +/- %s" % (np.average(best_y), np.std(best_y)))
return np.array([[np.average(best_x), np.std(best_x)],
[np.average(best_y), np.std(best_y)]])
|
foxsi/SAASpy
|
saaspy/image.py
|
Python
|
mit
| 5,888
|
[
"Gaussian"
] |
6a6ac5dcad78fdddca3563112bb92b74c8aa39ea9d0f99a1d431bd5ed486de32
|
"""
********************************************************************************
* Name: Test Grid Template
* Author: Alan D. Snow
* Created On: September 16, 2016
* License: BSD 3-Clause
********************************************************************************
"""
import itertools
from netCDF4 import Dataset
from numpy import array
from numpy.testing import assert_almost_equal
import os
from osgeo import gdal
from shutil import rmtree
import unittest
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
class TestGridTemplate(unittest.TestCase):
# Define workspace
readDirectory = os.path.join(SCRIPT_DIR, 'grid_standard')
writeDirectory = os.path.join(SCRIPT_DIR, 'out')
def _compare_netcdf_files(self, original, new, ext="nc"):
"""
Compare the contents of two netcdf files
"""
filenameO = '%s.%s' % (original, ext)
filePathO = os.path.join(self.readDirectory, filenameO)
filenameN = '%s.%s' % (new, ext)
filePathN = os.path.join(self.writeDirectory, filenameN)
dO = Dataset(filePathO)
dN = Dataset(filePathN)
assert_almost_equal(dO.variables['time'][:], dN.variables['time'][:], decimal=5)
assert_almost_equal(dO.variables['lon'][:], dN.variables['lon'][:], decimal=5)
assert_almost_equal(dO.variables['lat'][:], dN.variables['lat'][:], decimal=5)
assert_almost_equal(dO.variables['precipitation'][:], dN.variables['precipitation'][:], decimal=5)
assert_almost_equal(dO.variables['pressure'][:], dN.variables['pressure'][:], decimal=4)
assert_almost_equal(dO.variables['relative_humidity'][:], dN.variables['relative_humidity'][:], decimal=4)
assert_almost_equal(dO.variables['wind_speed'][:], dN.variables['wind_speed'][:], decimal=5)
assert_almost_equal(dO.variables['direct_radiation'][:], dN.variables['direct_radiation'][:], decimal=4)
assert_almost_equal(dO.variables['diffusive_radiation'][:], dN.variables['diffusive_radiation'][:], decimal=5)
assert_almost_equal(dO.variables['temperature'][:], dN.variables['temperature'][:], decimal=5)
assert_almost_equal(dO.variables['cloud_cover'][:], dN.variables['cloud_cover'][:], decimal=5)
self.assertEqual(dO.getncattr("proj4"),dN.getncattr("proj4"))
assert_almost_equal(dO.getncattr("geotransform"),dN.getncattr("geotransform"))
dO.close()
dN.close()
def _compare_files(self, original, new, raster=False, precision=7):
"""
Compare the contents of two files
"""
if raster:
dsO = gdal.Open(original)
dsN = gdal.Open(new)
# compare data
rO = array(dsO.ReadAsArray())
rN = array(dsN.ReadAsArray())
assert_almost_equal(rO, rN, decimal=precision)
# compare geotransform
assert_almost_equal(dsO.GetGeoTransform(), dsN.GetGeoTransform(),
decimal=5)
# compare band counts
assert dsO.RasterCount == dsN.RasterCount
# compare nodata
for band_id in range(1, dsO.RasterCount+1):
assert (dsO.GetRasterBand(band_id).GetNoDataValue()
== dsN.GetRasterBand(band_id).GetNoDataValue())
else:
with open(original) as fileO:
contentsO = fileO.read()
linesO = contentsO.strip().split()
with open(new) as fileN:
contentsN = fileN.read()
linesN = contentsN.strip().split()
for lineO, lineN in zip(linesO, linesN):
for valO, valN in zip(lineO.split(), lineN.split()):
try:
valO = float(valO)
valN = float(valN)
assert_almost_equal(valO, valN, precision)
except ValueError:
self.assertEqual(valO, valN)
pass
def _compare_directories(self, dir1, dir2, ignore_file=None, raster=False, precision=7):
"""
Compare the contents of the files of two directories
"""
for afile in os.listdir(dir2):
if not os.path.basename(afile).startswith(".")\
and not afile == ignore_file:
# Compare files with same name
try:
self._compare_files(os.path.join(dir1, afile),
os.path.join(dir2, afile),
raster=raster,
precision=precision)
except AssertionError:
print(os.path.join(dir1, afile))
print(os.path.join(dir2, afile))
raise
def _list_compare(self, listone, listtwo):
for one, two in itertools.izip(listone, listtwo):
self.assertEqual(one, two)
def _before_teardown(self):
"""
Method to execute at beginning of tearDown
"""
return
def tearDown(self):
"""
Method to cleanup after tests
"""
self._before_teardown()
# Clear out directory
fileList = os.listdir(self.writeDirectory)
for afile in fileList:
if not afile.endswith('.gitignore'):
path = os.path.join(self.writeDirectory, afile)
if os.path.isdir(path):
rmtree(path)
else:
os.remove(path)
|
CI-WATER/gsshapy
|
tests/template.py
|
Python
|
bsd-3-clause
| 5,573
|
[
"NetCDF"
] |
3b8bad5a41fee98227004debe4c32ef4171c44f80f8a4284e27d80f80512ebb3
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2010-2013 Francois Beaune, Jupiter Jazz Limited
# Copyright (c) 2014-2016 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from distutils import archive_util, dir_util
from stat import *
from subprocess import *
from xml.etree.ElementTree import ElementTree
import glob
import os
import platform
import re
import shutil
import subprocess
import sys
import time
import traceback
import zipfile
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
VERSION = "2.4.1"
SETTINGS_FILENAME = "appleseed.package.configuration.xml"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def info(message):
print(" " + message)
def progress(message):
print(" " + message + "...")
def fatal(message):
print("Fatal: " + message + ". Aborting.")
if sys.exc_info()[0]:
print(traceback.format_exc())
sys.exit(1)
def exe(filepath):
return filepath + ".exe" if os.name == "nt" else filepath
def safe_delete_file(path):
try:
if os.path.exists(path):
os.remove(path)
except OSError:
fatal("Failed to delete file '" + path + "'")
def safe_delete_directory(path):
Attempts = 10
for attempt in range(Attempts):
try:
if os.path.exists(path):
shutil.rmtree(path)
return
except OSError:
if attempt < Attempts - 1:
time.sleep(0.5)
else:
fatal("Failed to delete directory '" + path + "'")
def safe_make_directory(path):
if not os.path.isdir(path):
os.makedirs(path)
def pushd(path):
old_path = os.getcwd()
os.chdir(path)
return old_path
def extract_zip_file(zip_path, output_path):
zf = zipfile.ZipFile(zip_path)
zf.extractall(output_path)
zf.close()
def copy_glob(input_pattern, output_path):
for input_file in glob.glob(input_pattern):
shutil.copy(input_file, output_path)
def make_writable(filepath):
os.chmod(filepath, S_IRUSR | S_IWUSR)
#--------------------------------------------------------------------------------------------------
# Settings.
#--------------------------------------------------------------------------------------------------
class Settings:
def load(self):
print("Loading settings from " + SETTINGS_FILENAME + "...")
tree = ElementTree()
try:
tree.parse(SETTINGS_FILENAME)
except IOError:
fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'")
self.load_values(tree)
self.print_summary()
def load_values(self, tree):
self.configuration = self.__get_required(tree, "configuration")
self.platform_id = self.__get_required(tree, "platform_id")
self.platform_name = self.__get_required(tree, "platform_name")
self.appleseed_path = self.__get_required(tree, "appleseed_path")
self.headers_path = self.__get_required(tree, "headers_path")
self.qt_runtime_path = self.__get_required(tree, "qt_runtime_path")
self.platform_runtime_path = self.__get_required(tree, "platform_runtime_path")
self.package_output_path = self.__get_required(tree, "package_output_path")
def print_summary(self):
print("")
print(" Configuration: " + self.configuration)
print(" Platform ID: " + self.platform_id + " (Python says " + os.name + ")")
print(" Platform Name: " + self.platform_name)
print(" Path to appleseed: " + self.appleseed_path)
print(" Path to appleseed headers: " + self.headers_path)
print(" Path to Qt runtime: " + self.qt_runtime_path)
if os.name == "nt":
print(" Path to platform runtime: " + self.platform_runtime_path)
print(" Output directory: " + self.package_output_path)
print("")
def __get_required(self, tree, key):
value = tree.findtext(key)
if value is None:
fatal("Missing value \"{0}\" in configuration file".format(key))
return value
#--------------------------------------------------------------------------------------------------
# Package information.
#--------------------------------------------------------------------------------------------------
class PackageInfo:
def __init__(self, settings):
self.settings = settings
def load(self):
print("Loading package information...")
self.retrieve_git_tag()
self.build_package_path()
self.print_summary()
def retrieve_git_tag(self):
old_path = pushd(self.settings.appleseed_path)
self.version = Popen("git describe --long", stdout=PIPE, shell=True).stdout.read().strip()
os.chdir(old_path)
def build_package_path(self):
package_name = "appleseed-" + self.version + "-" + self.settings.platform_name + ".zip"
self.package_path = os.path.join(self.settings.package_output_path, self.version, package_name)
def print_summary(self):
print("")
print(" Version: " + self.version)
print(" Package path: " + self.package_path)
print("")
#--------------------------------------------------------------------------------------------------
# Base package builder.
#--------------------------------------------------------------------------------------------------
class PackageBuilder:
def __init__(self, settings, package_info):
self.settings = settings
self.package_info = package_info
def build_package(self):
print("Building package:")
print("")
self.orchestrate()
print("")
print("The package was successfully built.")
def orchestrate(self):
self.remove_leftovers()
self.retrieve_sandbox_from_git_repository()
self.deploy_sandbox_to_stage()
self.cleanup_stage()
self.add_local_binaries_to_stage()
self.add_local_libraries_to_stage()
self.add_headers_to_stage()
self.add_scripts_to_stage()
self.add_local_schema_files_to_stage()
self.add_text_files_to_stage()
self.add_dummy_files_into_empty_directories()
self.disable_system_qt_plugins()
self.alter_stage()
self.build_final_zip_file()
self.remove_stage()
def remove_leftovers(self):
progress("Removing leftovers from previous invocations")
safe_delete_directory("appleseed")
safe_delete_file("sandbox.zip")
safe_delete_file(self.package_info.package_path)
def retrieve_sandbox_from_git_repository(self):
progress("Retrieving sandbox from Git repository")
old_path = pushd(os.path.join(self.settings.appleseed_path, "sandbox"))
self.run("git archive --format=zip --output=" + os.path.join(old_path, "sandbox.zip") + " --worktree-attributes HEAD")
os.chdir(old_path)
def deploy_sandbox_to_stage(self):
progress("Deploying sandbox to staging directory")
extract_zip_file("sandbox.zip", "appleseed/")
safe_delete_file("sandbox.zip")
def cleanup_stage(self):
progress("Cleaning up staging directory")
# Remove API reference documentation.
safe_delete_directory("appleseed/documentation/apireference")
# Remove the test suite.
safe_delete_directory("appleseed/tests/test scenes")
# Remove voluminous unit tests/benchmarks data.
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_particles.bin")
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_photons.bin")
# Remove the devkit which we ship separately.
safe_delete_directory("appleseed/extras/devkit")
def add_local_binaries_to_stage(self):
progress("Adding local binaries to staging directory")
safe_make_directory("appleseed/bin")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/bin", self.settings.configuration), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("maketx")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslc")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslinfo")), "appleseed/bin/")
def add_local_libraries_to_stage(self):
progress("Adding local libraries to staging directory")
safe_make_directory("appleseed/lib")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/lib", self.settings.configuration), "appleseed/lib/")
#
# This method is used by the Mac and Linux package builders.
# It requires the following members to be defined:
#
# self.shared_lib_ext
# self.get_dependencies_for_file()
#
def add_unix_dependencies_to_stage(self):
# Get shared libs needed by binaries.
bin_libs = set()
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
libs = self.get_dependencies_for_file(os.path.join("appleseed/bin", filename))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by appleseed.python.
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
appleseedpython_shared_lib = "_appleseedpython" + self.shared_lib_ext
if appleseedpython_shared_lib in filenames:
libs = self.get_dependencies_for_file(os.path.join(dirpath, appleseedpython_shared_lib))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by libraries.
lib_libs = set()
for lib in bin_libs:
libs = self.get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = bin_libs.union(lib_libs)
if False:
# Print dependencies.
info(" Dependencies:")
for lib in all_libs:
info(" " + lib)
# Copy needed libs to lib directory.
dest_dir = os.path.join("appleseed", "lib/")
for lib in all_libs:
# The library might already exist, but without writing rights.
lib_name = os.path.basename(lib)
dest_path = os.path.join(dest_dir, lib_name)
if not os.path.exists(dest_path):
progress(" Copying {0} to {1}".format(lib, dest_dir))
shutil.copy(lib, dest_dir)
make_writable(dest_path)
def add_headers_to_stage(self):
progress("Adding headers to staging directory")
# appleseed headers.
safe_make_directory("appleseed/include")
ignore_files = shutil.ignore_patterns("*.cpp", "*.c", "*.xsd", "stdosl.h", "oslutil.h", "snprintf", "version.h.in")
shutil.copytree(os.path.join(self.settings.headers_path, "foundation"), "appleseed/include/foundation", ignore = ignore_files)
shutil.copytree(os.path.join(self.settings.headers_path, "main"), "appleseed/include/main", ignore = ignore_files)
shutil.copytree(os.path.join(self.settings.headers_path, "renderer"), "appleseed/include/renderer", ignore = ignore_files)
# OSL headers.
shutil.copy(os.path.join(self.settings.headers_path, "renderer/kernel/shading/oslutil.h"), "appleseed/shaders/")
shutil.copy(os.path.join(self.settings.headers_path, "renderer/kernel/shading/stdosl.h"), "appleseed/shaders/")
def add_scripts_to_stage(self):
progress("Adding scripts to staging directory")
shutil.copy("convertmany.py", "appleseed/bin/")
shutil.copy("rendermany.py", "appleseed/bin/")
shutil.copy("updatemany.py", "appleseed/bin/")
shutil.copy("rendernode.py", "appleseed/bin/")
shutil.copy("rendermanager.py", "appleseed/bin/")
def add_local_schema_files_to_stage(self):
progress("Adding local schema files to staging directory")
safe_make_directory("appleseed/schemas")
copy_glob(os.path.join(self.settings.appleseed_path, "sandbox/schemas/*.xsd"), "appleseed/schemas/")
def add_text_files_to_stage(self):
progress("Adding LICENSE.txt and README.md files")
shutil.copy(os.path.join(self.settings.appleseed_path, "LICENSE.txt"), "appleseed/")
shutil.copy(os.path.join(self.settings.appleseed_path, "README.md"), "appleseed/")
def add_dummy_files_into_empty_directories(self):
progress("Adding dummy files to preserve empty directories")
for dirpath, dirnames, filenames in os.walk("."):
if len(dirnames) == 0 and len(filenames) == 0:
self.create_preserve_file(dirpath)
def disable_system_qt_plugins(self):
progress("Disabling system's Qt plugins")
with open("appleseed/bin/qt.conf", "w") as f:
pass
def create_preserve_file(self, path):
with open(os.path.join(path, "preserve.txt"), "w") as f:
f.write("This file allows to preserve this otherwise empty directory.\n")
# This method is overridden in the platform-specific builders below.
def alter_stage(self):
return
def build_final_zip_file(self):
progress("Building final zip file from staging directory")
package_base_path = os.path.splitext(self.package_info.package_path)[0]
archive_util.make_zipfile(package_base_path, "appleseed")
def remove_stage(self):
safe_delete_directory("appleseed")
def run(self, cmdline):
info("Running command line: {0}".format(cmdline))
os.system(cmdline)
def run_subprocess(self, cmdline):
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
#--------------------------------------------------------------------------------------------------
# Windows package builder.
#--------------------------------------------------------------------------------------------------
class WindowsPackageBuilder(PackageBuilder):
def alter_stage(self):
self.add_dependencies_to_stage()
def add_dependencies_to_stage(self):
progress("Windows-specific: Adding dependencies to staging directory")
self.copy_qt_framework("QtCore")
self.copy_qt_framework("QtGui")
copy_glob(os.path.join(self.settings.platform_runtime_path, "*"), "appleseed/bin/")
def copy_qt_framework(self, framework_name):
src_filepath = os.path.join(self.settings.qt_runtime_path, framework_name + "4" + ".dll")
dst_path = os.path.join("appleseed", "bin")
shutil.copy(src_filepath, dst_path)
#--------------------------------------------------------------------------------------------------
# Mac package builder.
#--------------------------------------------------------------------------------------------------
class MacPackageBuilder(PackageBuilder):
def __init__(self, settings, package_info):
PackageBuilder.__init__(self, settings, package_info)
self.build_path = os.path.join(self.settings.appleseed_path, "build", self.settings.platform_id)
self.shared_lib_ext = ".dylib"
self.system_libs_prefixes = ["/System/Library/", "/usr/lib/libcurl", "/usr/lib/libc++",
"/usr/lib/libbz2", "/usr/lib/libSystem", "usr/lib/libz",
"/usr/lib/libncurses"]
def alter_stage(self):
safe_delete_file("appleseed/bin/.DS_Store")
self.add_dependencies_to_stage()
self.fixup_binaries()
self.create_qt_conf_file()
os.rename("appleseed/bin/appleseed.studio", "appleseed/bin/appleseed-studio")
def add_dependencies_to_stage(self):
progress("Mac-specific: Adding dependencies to staging directory")
self.add_unix_dependencies_to_stage()
self.copy_qt_framework("QtCore")
self.copy_qt_framework("QtGui")
self.copy_qt_resources("QtGui")
self.copy_qt_framework("QtOpenGL")
def copy_qt_framework(self, framework_name):
framework_dir = framework_name + ".framework"
src_filepath = os.path.join(self.settings.qt_runtime_path, framework_dir, "Versions", "4", framework_name)
dest_path = os.path.join("appleseed", "lib", framework_dir, "Versions", "4")
safe_make_directory(dest_path)
shutil.copy(src_filepath, dest_path)
make_writable(os.path.join(dest_path, framework_name))
def copy_qt_resources(self, framework_name):
framework_dir = framework_name + ".framework"
src_path = os.path.join(self.settings.qt_runtime_path, framework_dir, "Versions", "4", "Resources")
dest_path = os.path.join("appleseed", "lib", framework_dir, "Resources")
shutil.copytree(src_path, dest_path)
def fixup_binaries(self):
progress("Mac-specific: Fixing up binaries")
self.set_libraries_ids()
self.set_qt_framework_ids()
self.change_library_paths_in_libraries()
self.change_library_paths_in_executables()
self.change_qt_framework_paths_in_qt_frameworks()
def set_libraries_ids(self):
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".dylib":
lib_path = os.path.join(dirpath, filename)
self.set_library_id(lib_path, filename)
def set_qt_framework_ids(self):
self.set_library_id("appleseed/lib/QtCore.framework/Versions/4/QtCore", "QtCore.framework/Versions/4/QtCore")
self.set_library_id("appleseed/lib/QtGui.framework/Versions/4/QtGui", "QtGui.framework/Versions/4/QtGui")
self.set_library_id("appleseed/lib/QtOpenGL.framework/Versions/4/QtOpenGL", "QtOpenGL.framework/Versions/4/QtOpenGL")
def change_library_paths_in_libraries(self):
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".dylib":
lib_path = os.path.join(dirpath, filename)
self.change_library_paths_in_binary(lib_path)
self.change_qt_framework_paths_in_binary(lib_path)
def change_library_paths_in_executables(self):
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
exe_path = os.path.join(dirpath, filename)
self.change_library_paths_in_binary(exe_path)
self.change_qt_framework_paths_in_binary(exe_path)
# Can be used on executables and dynamic libraries.
def change_library_paths_in_binary(self, bin_path):
for lib_path in self.get_dependencies_for_file(bin_path, fix_paths = False):
lib_name = os.path.basename(lib_path)
self.change_library_path(bin_path, lib_path, "@executable_path/../lib/" + lib_name)
# Can be used on executables and dynamic libraries.
def change_qt_framework_paths_in_binary(self, bin_path):
for fwk_path in self.get_qt_frameworks_for_file(bin_path):
fwk_name = re.search(r"(Qt.*)\.framework", fwk_path).group(1)
self.change_library_path(bin_path, fwk_path, "@executable_path/../lib/{0}.framework/Versions/4/{0}".format(fwk_name))
def change_qt_framework_paths_in_qt_frameworks(self):
self.change_qt_framework_paths_in_binary("appleseed/lib/QtCore.framework/Versions/4/QtCore")
self.change_qt_framework_paths_in_binary("appleseed/lib/QtGui.framework/Versions/4/QtGui")
self.change_qt_framework_paths_in_binary("appleseed/lib/QtOpenGL.framework/Versions/4/QtOpenGL")
def set_library_id(self, target, name):
self.run('install_name_tool -id "{0}" {1}'.format(name, target))
def change_library_path(self, target, old, new):
self.run('install_name_tool -change "{0}" "{1}" {2}'.format(old, new, target))
def get_dependencies_for_file(self, filename, fix_paths = True):
returncode, out, err = self.run_subprocess(["otool", "-L", filename])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
# Ignore libs relative to @loader_path.
if "@loader_path" in lib:
continue
# Ignore system libs.
if self.is_system_lib(lib):
continue
# Ignore Qt frameworks.
if re.search(r"Qt.*\.framework", lib):
continue
if fix_paths:
# Optionally search for libraries in other places.
if not os.path.exists(lib):
candidate = os.path.join("/usr/local/lib/", lib)
if os.path.exists(candidate):
lib = candidate
libs.add(lib)
return libs
def get_qt_frameworks_for_file(self, filename, fix_paths = True):
returncode, out, err = self.run_subprocess(["otool", "-L", filename])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
if re.search(r"Qt.*\.framework", lib):
libs.add(lib)
return libs
def is_system_lib(self, lib):
for prefix in self.system_libs_prefixes:
if lib.startswith(prefix):
return True
return False
def create_qt_conf_file(self):
safe_make_directory("appleseed/bin/Contents/Resources")
open("appleseed/bin/Contents/Resources/qt.conf", "w").close()
#--------------------------------------------------------------------------------------------------
# Linux package builder.
#--------------------------------------------------------------------------------------------------
class LinuxPackageBuilder(PackageBuilder):
def __init__(self, settings, package_info):
PackageBuilder.__init__(self, settings, package_info)
self.shared_lib_ext = ".so"
self.system_libs_prefixes = ["linux", "librt", "libpthread", "libGL", "libX", "libselinux",
"libICE", "libSM", "libdl", "libm.so", "libgcc", "libc.so",
"/lib64/ld-linux-", "libstdc++", "libxcb", "libdrm", "libnsl",
"libuuid", "libgthread", "libglib", "libgobject", "libglapi",
"libffi", "libfontconfig", "libutil", "libpython",
"libxshmfence.so"]
def alter_stage(self):
self.make_executable(os.path.join("appleseed/bin", "maketx"))
self.make_executable(os.path.join("appleseed/bin", "oslc"))
self.make_executable(os.path.join("appleseed/bin", "oslinfo"))
self.add_dependencies_to_stage()
self.set_runtime_paths_on_binaries()
self.clear_runtime_paths_on_libraries()
def make_executable(self, filepath):
mode = os.stat(filepath)[ST_MODE]
mode |= S_IXUSR | S_IXGRP | S_IXOTH
os.chmod(filepath, mode)
def add_dependencies_to_stage(self):
progress("Linux-specific: Adding dependencies to staging directory")
self.add_unix_dependencies_to_stage()
def set_runtime_paths_on_binaries(self):
progress("Linux-specific: Setting runtime paths on binaries")
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
self.run("chrpath -r \$ORIGIN/../lib " + os.path.join("appleseed/bin", filename))
def clear_runtime_paths_on_libraries(self):
progress("Linux-specific: Clearing runtime paths on libraries")
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".so":
self.run("chrpath -d " + os.path.join(dirpath, filename))
def get_dependencies_for_file(self, filename):
returncode, out, err = self.run_subprocess(["ldd", filename])
if returncode != 0:
fatal("Failed to invoke ldd(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n"):
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Ignore system libs.
if self.is_system_lib(line):
continue
libs.add(line.split()[2])
return libs
def is_system_lib(self, lib):
for prefix in self.system_libs_prefixes:
if lib.startswith(prefix):
return True
return False
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
print("appleseed.package version " + VERSION)
print("")
settings = Settings()
package_info = PackageInfo(settings)
settings.load()
package_info.load()
if os.name == "nt":
package_builder = WindowsPackageBuilder(settings, package_info)
elif os.name == "posix" and platform.mac_ver()[0] != "":
package_builder = MacPackageBuilder(settings, package_info)
elif os.name == "posix" and platform.mac_ver()[0] == "":
package_builder = LinuxPackageBuilder(settings, package_info)
else:
fatal("Unsupported platform: " + os.name)
package_builder.build_package()
if __name__ == '__main__':
main()
|
docwhite/appleseed
|
scripts/appleseed.package.py
|
Python
|
mit
| 28,631
|
[
"VisIt"
] |
e41fa11a90c703269fff21a92ab7d25bd70e7d61398e7ce526de2731af207c41
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import sys
import webbrowser
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir,
'build', 'android'))
from profile_chrome import chrome_startup_controller
from profile_chrome import controllers
from profile_chrome import flags
from profile_chrome import profiler
from profile_chrome import systrace_controller
from profile_chrome import ui
from pylib.device import device_utils
def _CreateOptionParser():
parser = optparse.OptionParser(description='Record about://tracing profiles '
'from Android browsers startup, combined with '
'Android systrace. See http://dev.chromium.org'
'/developers/how-tos/trace-event-profiling-'
'tool for detailed instructions for '
'profiling.')
parser.add_option('--url', help='URL to visit on startup. Default: '
'https://www.google.com. An empty URL launches Chrome with'
' a MAIN action instead of VIEW.',
default='https://www.google.com', metavar='URL')
parser.add_option('--cold', help='Flush the OS page cache before starting the'
' browser. Note that this require a device with root '
'access.', default=False, action='store_true')
parser.add_option_group(flags.SystraceOptions(parser))
parser.add_option_group(flags.OutputOptions(parser))
browsers = sorted(profiler.GetSupportedBrowsers().keys())
parser.add_option('-b', '--browser', help='Select among installed browsers. '
'One of ' + ', '.join(browsers) + ', "stable" is used by '
'default.', type='choice', choices=browsers,
default='stable')
parser.add_option('-v', '--verbose', help='Verbose logging.',
action='store_true')
parser.add_option('-z', '--compress', help='Compress the resulting trace '
'with gzip. ', action='store_true')
parser.add_option('-t', '--time', help='Stops tracing after N seconds, 0 to '
'manually stop (startup trace ends after at most 5s).',
default=5, metavar='N', type='int')
return parser
def main():
parser = _CreateOptionParser()
options, _ = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
devices = device_utils.DeviceUtils.HealthyDevices()
if len(devices) != 1:
logging.error('Exactly 1 device must be attached.')
return 1
device = devices[0]
package_info = profiler.GetSupportedBrowsers()[options.browser]
if options.systrace_categories in ['list', 'help']:
ui.PrintMessage('\n'.join(
systrace_controller.SystraceController.GetCategories(device)))
return 0
systrace_categories = (options.systrace_categories.split(',')
if options.systrace_categories else [])
enabled_controllers = []
# Enable the systrace and chrome controller. The systrace controller should go
# first because otherwise the resulting traces miss early systrace data.
if systrace_categories:
enabled_controllers.append(systrace_controller.SystraceController(
device, systrace_categories, False))
enabled_controllers.append(
chrome_startup_controller.ChromeStartupTracingController(
device, package_info, options.cold, options.url))
if options.output:
options.output = os.path.expanduser(options.output)
result = profiler.CaptureProfile(enabled_controllers,
options.time,
output=options.output,
compress=options.compress,
write_json=options.json)
if options.view:
if sys.platform == 'darwin':
os.system('/usr/bin/open %s' % os.path.abspath(result))
else:
webbrowser.open(result)
if __name__ == '__main__':
sys.exit(main())
|
ltilve/ChromiumGStreamerBackend
|
tools/profile_chrome_startup.py
|
Python
|
bsd-3-clause
| 4,249
|
[
"VisIt"
] |
c41ec21c039483bbfd32f198639729fb68b2368d622d0a4ad67b8b098eb4eddc
|
# qmpy/analysis/thermodynamics/space.py
import networkx as nx
from scipy.spatial import ConvexHull
import matplotlib.pyplot as plt
import logging
from django.db import transaction
import qmpy
from qmpy.utils import *
from . import phase
from .reaction import Reaction
from .equilibrium import Equilibrium
logger = logging.getLogger(__name__)
if qmpy.FOUND_PULP:
import pulp
else:
logger.warn("Cannot import PuLP, cannot do GCLP")
class PhaseSpaceError(Exception):
pass
class Heap(dict):
def add(self, seq):
if len(seq) == 1:
self[seq[0]] = Heap()
return
seq = sorted(seq)
e0 = seq[0]
if e0 in self:
self[e0].add(seq[1:])
else:
self[e0] = Heap()
self[e0].add(seq[1:])
@property
def sequences(self):
seqs = []
for k, v in list(self.items()):
if not v:
seqs.append([k])
else:
for v2 in v.sequences:
seqs.append([k] + v2)
return seqs
class PhaseSpace(object):
"""
A PhaseSpace object represents, naturally, a region of phase space.
The most fundamental property of a PhaseSpace is its bounds,
which are given as a hyphen-delimited list of compositions. These represent
the extent of the phase space, and determine which phases are within the
space.
Next, a PhaseSpace has an attribute, data, which is a PhaseData object,
and is a container for Phase objects, which are used when performing
thermodynamic analysis on this space.
The majority of attributes are lazy, that is, they are only computed when
they are requested, and how to get them (of which there are often several
ways) is decided based on the size and shape of the phase space.
"""
def __init__(self, bounds, mus=None, data=None, **kwargs):
"""
Arguments:
bounds:
Sequence of compositions. Can be comma-delimited ("Fe,Ni,O"),
an actual list (['Fe', 'Ni', 'O']) or any other python
sequence. The compositions need not be elements, if you want to
take a slice through the Fe-Ni-O phase diagram between Fe3O4
and NiO, just do "Fe3O4-NiO".
Keyword Arguments
mus:
define a dictionary of chemical potentials. Will adjust all
calculated formation energies accordingly.
data:
If supplied with a PhaseData instance, it will be used
instead of loading from the OQMD. Can be used to significantly
reduce the amount of time spent querying the database when looping
through many PhaseSpaces.
Examples::
>>> ps = PhaseSpace('Fe-Li-O', load="legacy.dat")
>>> ps2 = PhaseSpace(['Fe','Li','O'], data=ps.data)
>>> ps = PhaseSpace(set(['Li', 'Ni', 'O']))
>>> ps = PhaseSpace('Li2O-Fe2O3')
"""
self.clear_all()
self.set_mus(mus)
self.set_bounds(bounds)
if data is None:
self.data = phase.PhaseData()
if bounds:
self.load(**kwargs)
else:
self.data = data.get_phase_data(self.space)
def __repr__(self):
if self.bounds is None:
return "<unbounded PhaseSpace>"
names = [format_comp(reduce_comp(b)) for b in self.bounds]
bounds = "-".join(names)
if self.mus:
bounds += " " + format_mus(self.mus)
return "<PhaseSpace bound by %s>" % bounds
def __getitem__(self, i):
return self.phases[i]
def __len__(self):
return len(self.phases)
def set_bounds(self, bounds):
bounds = parse_space(bounds)
if bounds is None:
self.bounds = None
return
elements = sorted(set.union(*[set(b.keys()) for b in bounds]))
basis = []
for b in bounds:
basis.append([b.get(k, 0) for k in elements])
self.bounds = bounds
self.basis = np.array(basis)
def infer_formation_energies(self):
mus = {}
for elt in self.space:
if elt in self.phase_dict:
mus[elt] = self.phase_dict[elt].energy
else:
mus[elt] = 0.0
for phase in self.phases:
for elt in self.space:
phase.energy -= phase.unit_comp.get(elt, 0) * mus[elt]
def set_mus(self, mus):
self.mus = {}
if mus is None:
return
elif isinstance(mus, str):
mus = mus.replace(",", " ")
for mu in mus.split():
self.mus.update(parse_mu(mu))
elif isinstance(mus, dict):
self.mus = mus
def load(self, **kwargs):
"""
Loads oqmd data into the associated PhaseData object.
"""
target = kwargs.get("load", "oqmd")
if not target:
return
stable = kwargs.get("stable", False)
fit = kwargs.get("fit", "standard")
total = kwargs.get("total", (fit is None))
if target == "oqmd":
self.data.load_oqmd(self.space, fit=fit, stable=stable, total=total)
elif "legacy" in target:
self.data.load_library("legacy.dat")
elif target == "icsd":
self.data.load_oqmd(
self.space,
fit=fit,
search={"entry__path__contains": "icsd"},
stable=stable,
total=total_energy,
)
elif target == "prototypes":
self.data.load_oqmd(
space=self.space,
fit=fit,
search={"path__contains": "prototypes"},
stable=stable,
total=total_energy,
)
elif target == None:
pass
else:
raise ValueError("Unknown load argument: %s" % target)
def get_subspace(self, space):
data = self.data.get_phase_data(space)
return PhaseSpace(space, data=data)
_phases = None
@property
def phases(self):
if self._phases:
return self._phases
phases = [p for p in self.data.phases if self.in_space(p) and p.use]
self._phases = phases
return self._phases
@phases.setter
def phases(self, phases):
self.clear_all()
self.data = phase.PhaseData()
self.data.phases = phases
_phase_dict = None
@property
def phase_dict(self):
if self._phase_dict:
return self._phase_dict
phase_dict = dict(
[
(k, p)
for k, p in list(self.data.phase_dict.items())
if p.use and self.in_space(p)
]
)
self._phase_dict = phase_dict
return self._phase_dict
@phase_dict.setter
def phase_dict(self, phase_dict):
self.clear_all()
self.data = phase.PhaseData()
self.data.phases = list(phase_dict.values())
def phase_energy(self, p):
dE = sum([self.mus.get(k, 0) * v for k, v in list(p.unit_comp.items())])
N = sum(v for k, v in list(p.unit_comp.items()) if k in self.bound_space)
if N == 0:
N = 1
return (p.energy - dE) / N
def phase_comp(self, p):
comp = dict((k, v) for k, v in list(p.comp.items()) if k in self.bound_elements)
return unit_comp(comp)
def clear_data(self):
"""
Clears all phase data.
"""
self._phases = None
self._phase_dict = None
def clear_analysis(self):
"""
Clears all calculated results.
"""
self._stable = None
self._tie_lines = None
self._hull = None
self._spaces = None
self._dual_spaces = None
self._cliques = None
self._graph = None
def clear_all(self):
"""
Clears input data and analyzed results.
Same as:
>>> PhaseData.clear_data()
>>> PhaseData.clear_analysis()
"""
self.clear_data()
self.clear_analysis()
def load_tie_lines(self):
raise NotImplementedError
@property
def comp_dimension(self):
"""
Compositional dimension of the region of phase space.
Examples::
>>> s = PhaseSpace('Fe-Li-O')
>>> s.comp_dimension
2
>>> s = PhaseSpace('FeO-Ni2O-CoO-Ti3O4')
>>> s.comp_dimension
3
"""
return len(self.bounds) - 1
@property
def chempot_dimension(self):
"""
Chemical potential dimension.
Examples::
>>> s = PhaseSpace('Fe-Li', 'O=-2.5')
>>> s.chempot_dimension
0
>>> s = PhaseSpace('Fe-Li', 'N=0:-5')
>>> s.chempot_dimension
1
>>> s = PhaseSpace('Fe-Li', 'N=0:-5 F=0:-5')
>>> s.chempot_dimension
2
"""
cpdims = [k for k, v in list(self.mus.items()) if isinstance(v, list)]
return len(cpdims)
@property
def shape(self):
"""
(# of compositional dimensions, # of chemical potential dimensions)
The shape attribute of the PhaseSpace determines what type of phase
diagram will be drawn.
Examples::
>>> s = PhaseSpace('Fe-Li', 'O=-1.2')
>>> s.shape
(1, 0)
>>> s = PhaseSpace('Fe-Li', 'O=0:-5')
>>> s.shape
(1, 1)
>>> s = PhaseSpace('Fe-Li-P', 'O=0:-5')
>>> s.shape
(2,1)
>>> s = PhaseSpace('Fe', 'O=0:-5')
>>> s.shape
(0, 1)
"""
return (self.comp_dimension, self.chempot_dimension)
@property
def bound_space(self):
"""
Set of elements _of fixed composition_ in the PhaseSpace.
Examples::
>>> s = PhaseSpace('Fe-Li', 'O=-1.4')
>>> s.bound_space
set(['Fe', 'Li'])
"""
if self.bounds is None:
return set()
return set.union(*[set(b.keys()) for b in self.bounds])
@property
def bound_elements(self):
"""
Alphabetically ordered list of elements with constrained composition.
"""
return sorted(self.bound_space)
@property
def space(self):
"""
Set of elements present in the PhaseSpace.
Examples::
>>> s = PhaseSpace('Pb-Te-Se')
>>> s.space
set(['Pb', 'Te', 'Se'])
>>> s = PhaseSpace('PbTe-Na-PbSe')
>>> s.space
set(['Pb', 'Te', 'Na', 'Se'])
"""
return self.bound_space | set(self.mus.keys())
@property
def elements(self):
"""
Alphabetically ordered list of elements present in the PhaseSpace.
"""
return sorted(self.space)
def coord(self, composition, tol=1e-4):
"""Returns the barycentric coordinate of a composition, relative to the
bounds of the PhaseSpace. If the object isn't within the bounds, raises
a PhaseSpaceError.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.coord({'Fe':1, 'Li':1, 'O':2})
array([ 0.25, 0.25, 0.5 ])
>>> space = PhaseSpace('Fe2O3-Li2O')
>>> space.coord('Li5FeO4')
array([ 0.25, 0.75])
"""
if isinstance(composition, phase.Phase):
composition = composition.comp
elif isinstance(composition, str):
composition = parse_comp(composition)
composition = defaultdict(float, composition)
if self.bounds is None:
return np.array([composition[k] for k in self.bound_elements])
bcomp = dict(
(k, v) for k, v in list(composition.items()) if k in self.bound_space
)
composition = unit_comp(bcomp)
cvec = np.array([composition.get(k, 0) for k in self.bound_elements])
coord = np.linalg.lstsq(self.basis.T, cvec, rcond=None)[0]
if abs(sum(coord) - 1) > 1e-3 or any(c < -1e-3 for c in coord):
raise PhaseSpaceError
return coord
def comp(self, coord):
"""
Returns the composition of a coordinate in phase space.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.comp([0.2, 0.2, 0.6])
{'Fe': 0.2, 'O': 0.6, 'Li': 0.2}
"""
if self.bounds is None:
return defaultdict(float, list(zip(self.elements, coord)))
if len(coord) != len(self.bounds):
raise PhaseSpaceError
if len(coord) != len(self.bounds):
raise ValueError("Dimensions of coordinate must match PhaseSpace")
tot = sum(coord)
coord = [c / float(tot) for c in coord]
comp = defaultdict(float)
for b, x in zip(self.bounds, coord):
for elt, val in list(b.items()):
comp[elt] += val * x
return dict((k, v) for k, v in list(comp.items()) if v > 1e-4)
_spaces = None
@property
def spaces(self):
"""
List of lists of elements, such that every phase in self.phases
is contained in at least one set, and no set is a subset of
any other. This corresponds to the smallest subset of spaces that must
be analyzed to determine the stability of every phase in your dataset.
Examples::
>>> pa, pb, pc = Phase('A', 0), Phase('B', 0), Phase('C', 0)
>>> p1 = Phase('AB2', -1)
>>> p2 = Phase('B3C', -4)
>>> s = PhaseSpace('A-B-C', load=None)
>>> s.phases = [ pa, pb, pc, p1, p2 ]
>>> s.spaces
[['C', 'B'], ['A', 'B']]
"""
if self._spaces:
return self._spaces
spaces = set([frozenset(p.space) for p in list(self.phase_dict.values())])
spaces = [
space for space in spaces if not any([space < space2 for space2 in spaces])
]
self._spaces = list(map(list, spaces))
return self._spaces
def find_stable(self):
stable = set()
for space in self.spaces:
subspace = self.get_subspace(space)
stable |= set(subspace.stable)
self._stable = stable
return stable
_dual_spaces = None
@property
def dual_spaces(self):
"""
List of sets of elements, such that any possible tie-line
between two phases in phases is contained in at least one
set, and no set is a subset of any other.
"""
if self._dual_spaces is None:
# self._dual_spaces = self.get_dual_spaces()
self._dual_spaces = self.heap_structure_spaces()
return self._dual_spaces
def heap_structure_spaces(self):
if len(self.spaces) == 1:
return self.spaces
heap = Heap()
for i, (c1, c2) in enumerate(itertools.combinations(self.spaces, r=2)):
heap.add(set(c1 + c2))
return heap.sequences
def get_dual_spaces(self):
if len(self.spaces) == 1:
return self.spaces
dual_spaces = []
imax = len(self.spaces) ** 2 / 2
spaces = sorted(self.spaces, key=lambda x: -len(x))
for i, (c1, c2) in enumerate(itertools.combinations(spaces, r=2)):
c3 = frozenset(c1 + c2)
if c3 in sizes[n]:
break
for j, c4 in enumerate(dual_spaces):
if c3 <= c4:
break
elif c4 < c3:
dual_spaces[j] = c3
break
else:
dual_spaces.append(c3)
self._dual_spaces = dual_spaces
return self._dual_spaces
def find_tie_lines(self):
phases = list(self.phase_dict.values())
indict = dict((k, v) for v, k in enumerate(phases))
adjacency = np.zeros((len(indict), len(indict)))
for space in self.dual_spaces:
subspace = self.get_subspace(space)
for p1, p2 in subspace.tie_lines:
i1, i2 = sorted([indict[p1], indict[p2]])
adjacency[i1, i2] = 1
tl = set((phases[i], phases[j]) for i, j in zip(*np.nonzero(adjacency)))
self._tie_lines = tl
return tl
@property
def stable(self):
"""
List of stable phases
"""
if self._stable is None:
self.hull
# self.compute_hull()
return self._stable
@property
def unstable(self):
"""
List of unstable phases.
"""
if self._stable is None:
self.hull
# self.compute_hull()
return [p for p in self.phases if (not p in self.stable) and self.in_space(p)]
_tie_lines = None
@property
def tie_lines(self):
"""
List of length 2 tuples of phases with tie lines between them
"""
if self._tie_lines is None:
self.hull
# self.compute_hull()
return [list(tl) for tl in self._tie_lines]
@property
def tie_lines_list(self):
return list(self.tie_lines)
@property
def hull(self):
"""
List of facets of the convex hull.
"""
if self._hull is None:
self.get_hull()
return list(self._hull)
def get_hull(self):
if any(len(b) > 1 for b in self.bounds):
points = self.get_hull_points()
self.get_qhull(phases=points)
else:
self.get_qhull()
@property
def hull_list(self):
return list(self.hull)
_graph = None
@property
def graph(self):
"""
:mod:`networkx.Graph` representation of the phase space.
"""
if self._graph:
return self._graph
graph = nx.Graph()
graph.add_edges_from(self.tie_lines)
self._graph = graph
return self._graph
_cliques = None
@property
def cliques(self):
"""
Iterator over maximal cliques in the phase space. To get a list of
cliques, use list(PhaseSpace.cliques).
"""
if self._cliques is None:
self.find_cliques()
return self._cliques
def find_cliques(self):
self._cliques = nx.find_cliques(self.graph)
return self._cliques
def cliques_to_hull(self, cliques):
raise NotImplementedError
def stability_range(self, p, element=None):
"""
Calculate the range of phase `p` with respect to `element`.
"""
if element is None and len(self.mus) == 1:
element = list(self.mus.keys())[0]
tcomp = dict(p.unit_comp)
e, c = self.gclp(tcomp, mus=None)
tcomp[element] = tcomp.get(element, 0) + 0.001
edo, xdo = self.gclp(tcomp, mus=None)
tcomp[element] -= 0.001
if element in list(p.comp.keys()):
tcomp[element] -= 0.001
eup, xup = self.gclp(tcomp, mus=None)
return (edo - e) / 0.001, (e - eup) / 0.001
else:
return (edo - e) / 0.001, -20
def chempot_bounds(self, composition, total=False):
energy, phases = self.gclp(composition)
chems = {}
for eq in self.hull_list:
if not phases in eq:
continue
pots = eq.chemical_potentials
if total:
for k in pots:
pots[k] += qmpy.chem_pots["standard"]["elements"][k]
chems[eq] = pots
return chems
def chempot_range(self, p, element=None):
pot_bounds = {}
tcomp = dict(p.unit_comp)
e, c = self.gclp(tcomp, mus=None)
for elt in list(p.comp.keys()):
tcomp = dict(p.unit_comp)
tcomp[elt] -= 0.001
eup, xup = self.gclp(tcomp)
tcomp[elt] += 0.002
edo, xdo = self.gclp(tcomp)
pot_bounds[elt] = [(edo - e) / 0.001, (e - eup) / 0.001]
return pot_bounds
def get_tie_lines_by_gclp(self, iterable=False):
"""
Runs over pairs of Phases and tests for equilibrium by GCLP. Not
recommended, it is very slow.
"""
tie_lines = []
self.get_gclp_stable()
for k1, k2 in itertools.combinations(self.stable, 2):
testpoint = (self.coord(k1.unit_comp) + self.coord(k2.unit_comp)) / 2
energy, phases = self.gclp(self.comp(testpoint))
if abs(energy - (k1.energy + k2.energy) / 2) < 1e-8:
tie_lines.append([k1, k2])
if iterable:
yield [k1, k2]
self._tie_lines = tie_lines
def in_space(self, composition):
"""
Returns True, if the composition is in the right elemental-space
for this PhaseSpace.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.in_space('LiNiO2')
False
>>> space.in_space('Fe2O3')
True
"""
if self.bounds is None:
return True
if isinstance(composition, phase.Phase):
composition = composition.comp
elif isinstance(composition, str):
composition = parse_comp(composition)
if set(composition.keys()) <= self.space:
return True
else:
return False
def in_bounds(self, composition):
"""
Returns True, if the composition is within the bounds of the phase space
Examples::
>>> space = PhaseSpace('Fe2O3-NiO2-Li2O')
>>> space.in_bounds('Fe3O4')
False
>>> space.in_bounds('Li5FeO8')
True
"""
if self.bounds is None:
return True
if isinstance(composition, phase.Phase):
composition = composition.unit_comp
elif isinstance(composition, str):
composition = parse_comp(composition)
if not self.in_space(composition):
return False
composition = dict(
(k, v) for k, v in list(composition.items()) if k in self.bound_elements
)
composition = unit_comp(composition)
try:
c = self.coord(composition)
if len(self.bounds) < len(self.space):
comp = self.comp(c)
if set(comp.keys()) != set(composition.keys()) - set(self.mus.keys()):
return False
if not all(
[
abs(comp.get(k, 0) - composition.get(k, 0)) < 1e-3
for k in self.bound_elements
]
):
return False
except PhaseSpaceError:
return False
return True
### analysis stuff
def get_qhull(self, phases=None, mus={}):
"""
Get the convex hull for a given space.
"""
if phases is None: ## ensure there are phases to get the hull of
phases = list(self.phase_dict.values())
## ensure that all phases have negative formation energies
_phases = []
for p in phases:
if not p.use:
continue
if self.phase_energy(p) > 0:
continue
if not self.in_bounds(p):
continue
_phases.append(p)
phases = _phases
phase_space = set()
for p in phases:
phase_space |= p.space
A = []
for p in phases:
A.append(list(self.coord(p))[1:] + [self.phase_energy(p)])
dim = len(A[0])
for i in range(dim):
tmparr = [0 if a != i - 1 else 1 for a in range(dim)]
if not tmparr in A:
A.append(tmparr)
A = np.array(A)
if len(A) == len(A[0]):
self._hull = set([frozenset([p for p in phases])])
self._tie_lines = set(
[frozenset([k1, k2]) for k1, k2 in itertools.combinations(phases, r=2)]
)
self._stable = set([p for p in phases])
return
conv_hull = ConvexHull(A)
hull = set()
tie_lines = set()
stable = set()
for facet in conv_hull.simplices:
### various exclusion rules
if any([ind >= len(phases) for ind in facet]):
continue
if all(phases[ind].energy == 0 for ind in facet if ind < len(phases)):
continue
dim = len(facet)
face_matrix = np.array([A[i] for i in facet])
face_matrix[:, -1] = 1
v = np.linalg.det(face_matrix)
if abs(v) < 1e-8:
continue
face = frozenset([phases[ind] for ind in facet if ind < len(phases)])
stable |= set(face)
tie_lines |= set(
[frozenset([k1, k2]) for k1, k2 in itertools.combinations(face, r=2)]
)
hull.add(Equilibrium(face))
self._hull = hull
self._tie_lines = tie_lines
self._stable = stable
return hull
def get_chempot_qhull(self):
faces = list(self.hull)
A = []
for face in faces:
A.append([face.chem_pots[e] for e in self.elements])
A = np.array(A)
conv_hull = ConvexHull(A)
uhull = set()
for facet in conv_hull.simplices:
face = frozenset([faces[i] for i in facet if i < len(faces)])
uhull.add(face)
return uhull
def get_hull_points(self):
"""
Gets out-of PhaseSpace points. i.e. for FeSi2-Li, there are no other
phases in the space, but there are combinations of Li-Si phases and
Fe-Si phases. This method returns a list of phases including composite
phases from out of the space.
Examples::
>>> space = PhaseSpace('FeSi2-Li')
>>> space.get_hull_points()
[<Phase FeSi2 (23408): -0.45110217625>,
<Phase Li (104737): 0>,
<Phase 0.680 Li13Si4 + 0.320 FeSi : -0.3370691816>,
<Phase 0.647 Li8Si3 + 0.353 FeSi : -0.355992801765>,
<Phase 0.133 Fe3Si + 0.867 Li21Si5 : -0.239436904167>,
<Phase 0.278 FeSi + 0.722 Li21Si5 : -0.306877209723>]
"""
self._hull = set() # set of lists
self._stable = set() # set
done_list = [] # list of sorted lists
hull_points = [] # list of phases
if len(self.phases) == len(self.space):
self._hull = set(frozenset(self.phases))
self._stable = set(self.phases)
return
for b in self.bounds:
e, x = self.gclp(b)
p = phase.Phase.from_phases(x)
hull_points.append(p)
facets = [list(hull_points)]
while facets:
facet = facets.pop(0)
done_list.append(sorted(facet))
try:
phases, E = self.get_minima(list(self.phase_dict.values()), facet)
except:
continue
p = phase.Phase.from_phases(phases)
if p in self.phases:
p = self.phase_dict[p.name]
if not p in hull_points:
hull_points.append(p)
for new_facet in itertools.combinations(facet, r=len(facet) - 1):
new_facet = list(new_facet) + [p]
if new_facet not in done_list:
facets.append(new_facet)
return hull_points
def gclp(self, composition={}, mus={}, phases=[]):
"""
Returns energy, phase composition which is stable at given composition
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> energy, phases = space.gclp('FeLiO2')
>>> print phases
>>> print energy
"""
if not composition:
return 0.0, {}
if isinstance(composition, str):
composition = parse_comp(composition)
if not phases:
phases = [p for p in list(self.phase_dict.values()) if p.use]
_mus = self.mus
if mus is None:
_mus = {}
else:
_mus.update(mus)
in_phases = []
space = set(composition.keys()) | set(_mus)
for p in phases:
if p.energy is None:
continue
# if self.in_bounds(p):
if not set(p.comp.keys()) <= space:
continue
in_phases.append(p)
##[vh]
##print "in_phases: ", in_phases
return self._gclp(composition=composition, mus=_mus, phases=in_phases)
def _gclp(self, composition={}, mus={}, phases=[]):
if not qmpy.FOUND_PULP:
raise Exception(
"Cannot do GCLP without installing PuLP and an LP", "solver"
)
prob = pulp.LpProblem("GibbsEnergyMin", pulp.LpMinimize)
phase_vars = pulp.LpVariable.dicts("lib", phases, 0.0)
prob += (
pulp.lpSum(
[
(
p.energy
- sum(
[
p.unit_comp.get(elt, 0) * mu
for elt, mu in list(mus.items())
]
)
)
* phase_vars[p]
for p in phases
]
),
"Free Energy",
)
for elt, constraint in list(composition.items()):
prob += (
pulp.lpSum([p.unit_comp.get(elt, 0) * phase_vars[p] for p in phases])
== float(constraint),
"Conservation of " + elt,
)
##[vh]
##print prob
if pulp.GUROBI().available():
prob.solve(pulp.GUROBI(msg=False))
elif pulp.COIN_CMD().available():
prob.solve(pulp.COIN_CMD())
else:
prob.solve()
phase_comp = dict(
[
(p, phase_vars[p].varValue)
for p in phases
if phase_vars[p].varValue > 1e-5
]
)
energy = sum(p.energy * amt for p, amt in list(phase_comp.items()))
energy -= sum([a * composition.get(e, 0) for e, a in list(mus.items())])
return energy, phase_comp
def get_minima(self, phases, bounds):
"""
Given a set of Phases, get_minima will determine the minimum
free energy elemental composition as a weighted sum of these
compounds
"""
prob = pulp.LpProblem("GibbsEnergyMin", pulp.LpMinimize)
pvars = pulp.LpVariable.dicts("phase", phases, 0)
bvars = pulp.LpVariable.dicts("bound", bounds, 0.0, 1.0)
prob += (
pulp.lpSum(self.phase_energy(p) * pvars[p] for p in phases)
- pulp.lpSum(self.phase_energy(bound) * bvars[bound] for bound in bounds),
"Free Energy",
)
for elt in self.bound_space:
prob += (
sum([p.unit_comp.get(elt, 0) * pvars[p] for p in phases])
== sum([b.unit_comp.get(elt, 0) * bvars[b] for b in bounds]),
"Contraint to the proper range of" + elt,
)
prob += sum([bvars[b] for b in bounds]) == 1, "sum of bounds must be 1"
if pulp.GUROBI().available():
prob.solve(pulp.GUROBI(msg=False))
elif pulp.COIN_CMD().available():
prob.solve(pulp.COIN_CMD())
elif pulp.COINMP_DLL().available():
prob.solve(pulp.COINMP_DLL())
else:
prob.solve()
E = pulp.value(prob.objective)
xsoln = defaultdict(
float,
[(p, pvars[p].varValue) for p in phases if abs(pvars[p].varValue) > 1e-4],
)
return xsoln, E
def compute_hull(self):
phases = [
p
for p in list(self.phase_dict.values())
if (self.phase_energy(p) < 0 and len(p.space) > 1)
]
region = Region([self.phase_dict[elt] for elt in self.space])
region.contained = phases
def compute_stability(self, p):
"""
Compute the energy difference between the formation energy of a Phase,
and the energy of the convex hull in the absence of that phase.
"""
# if self.phase_dict[p.name] != p:
# stable = self.phase_dict[p.name]
# p.stability = p.energy - stable.energy
if len(p.comp) == 1:
stable = self.phase_dict[p.name]
p.stability = p.energy - stable.energy
else:
phases = list(self.phase_dict.values())
# < Mohan
# Add Error Handling for phase.remove(p)
# Old Code:
# phases.remove(p)
# New Code:
try:
phases.remove(p)
except ValueError:
import copy
_ps_dict = copy.deepcopy(self.phase_dict)
_ps_dict.pop(p.name, None)
phases = list(_ps_dict.values())
# Mohan >
energy, gclp_phases = self.gclp(p.unit_comp, phases=phases)
##print p, energy, gclp_phases
# vh
# print p, '------', gclp_phases
p.stability = p.energy - energy
# vh
return energy, gclp_phases
@transaction.atomic
def compute_stabilities(self, phases=None, save=False, reevaluate=True):
"""
Calculate the stability for every Phase.
Keyword Arguments:
phases:
List of Phases. If None, uses every Phase in PhaseSpace.phases
save:
If True, save the value for stability to the database.
new_only:
If True, only compute the stability for Phases which did not
import a stability from the OQMD. False by default.
"""
from qmpy.analysis.vasp.calculation import Calculation
if phases is None:
phases = self.phases
if reevaluate:
for p in self.phases:
p.stability = None
for p in phases:
if p.stability is None:
if p in list(self.phase_dict.values()):
self.compute_stability(p)
else:
p2 = self.phase_dict[p.name]
if p2.stability is None:
self.compute_stability(p2)
base = max(0, p2.stability)
diff = p.energy - p2.energy
p.stability = base + diff
if save:
qs = qmpy.FormationEnergy.objects.filter(id=p.id)
qs.update(stability=p.stability)
def save_tie_lines(self):
"""
Save all tie lines in this PhaseSpace to the OQMD. Stored in
Formation.equilibrium
"""
for p1, p2 in self.tie_lines:
p1.formation.equilibrium.add(p2.formation)
def compute_formation_energies(self):
"""
Evaluates the formation energy of every phase with respect to the
chemical potentials in the PhaseSpace.
"""
ref = []
for b in self.bounds:
if format_comp(b) in self.mus:
ref.append(self.mus[format_comp[b]])
else:
ref.append(self.gclp(b)[0])
ref = np.array(ref)
for p in self.phases:
p.energy = p.energy - sum(self.coord(p) * ref)
renderer = None
@property
def phase_diagram(self, **kwargs):
"""Renderer of a phase diagram of the PhaseSpace"""
if self.renderer is None:
self.get_phase_diagram(**kwargs)
return self.renderer
@property
def neighboring_equilibria(self):
neighbors = []
for eq1, eq2 in itertools.combinations(self.hull, r=2):
if eq1.adjacency(eq2) == 1:
neighbors.append([eq1, eq2])
return neighbors
def find_reaction_mus(self, element=None):
"""
Find the chemical potentials of a specified element at which reactions
occur.
Examples::
>>> s = PhaseSpace('Fe-Li-O')
>>> s.find_reaction_mus('O')
"""
if element is None and len(self.mus) == 1:
element = list(self.mus.keys())[0]
ps = PhaseSpace("-".join(self.space), data=self.data)
chem_pots = set()
for p in ps.stable:
chem_pots |= set(self.stability_range(p, element))
return sorted(chem_pots)
def chempot_scan(self, element=None, umin=None, umax=None):
"""
Scan through chemical potentials of `element` from `umin` to `umax`
identifing values at which phase transformations occur.
"""
if element is None and len(self.mus) == 1:
element = list(self.mus.keys())[0]
mus = self.find_reaction_mus(element=element)
if umin is None:
umin = min(mus)
if umax is None:
umax = max(mus)
windows = {}
hulls = []
mus = sorted(mus)
for i in range(len(mus)):
mu = mus[i]
if mu < umin or mu > umax:
continue
if i == 0:
nu = mu - 1
window = (None, mu)
elif i == len(mus) - 1:
nu = mu + 1
window = (mu, None)
else:
nu = np.average([mu, mus[i + 1]])
window = (mu, mus[i + 1])
self.mus[element] = nu
self.get_hull()
windows[window] = list(self.stable)
return windows
def get_phase_diagram(self, **kwargs):
"""
Creates a Renderer attribute with appropriate phase diagram components.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.get_renderer()
>>> plt.show()
"""
self.renderer = Renderer()
if self.shape == (0, 0):
self.make_as_unary(**kwargs)
elif self.shape == (1, 0):
self.make_as_binary(**kwargs)
elif self.shape == (2, 0):
self.make_as_ternary(**kwargs)
elif self.shape == (3, 0):
self.make_as_quaternary(**kwargs)
elif self.shape == (0, 1):
self.make_1d_vs_chempot(**kwargs)
elif self.shape == (1, 1):
self.make_vs_chempot(**kwargs)
else:
ps = PhaseSpace("-".join(self.space), data=self.data, load=None)
ps.renderer = Renderer()
ps.make_as_graph(**kwargs)
self.renderer = ps.renderer
def make_as_unary(self, **kwargs):
"""
Plot of phase volume vs formation energy.
Examples::
>>> s = PhaseSpace('Fe2O3')
>>> r = s.make_as_unary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
bottom, gclp = self.gclp(self.bounds[0])
bottom /= sum(self.bounds[0].values())
gs = phase.Phase.from_phases(gclp)
points = []
for p in self.phases:
if not self.in_bounds(p):
continue
if not p.calculation:
continue
v = p.calculation.volume_pa
pt = Point([v, self.phase_energy(p) - bottom], label=p.label)
points.append(pt)
# self.renderer.text.append(Text(pt, p.calculation.entry_id))
pc = PointCollection(points, color="red")
self.renderer.add(pc)
pt = Point([gs.volume, 0], label=gs.label, color="green")
self.renderer.add(pt)
xaxis = Axis("x", label="Volume", units="Å<sup>3</sup>/atom")
yaxis = Axis("y", label="Relative Energy", units="eV/atom")
self.renderer.xaxis = xaxis
self.renderer.yaxis = yaxis
self.renderer.options["grid"]["hoverable"] = True
self.renderer.options["tooltip"] = True
def make_1d_vs_chempot(self, **kwargs):
"""
Plot of phase stability vs chemical potential for a single composition.
Examples::
>>> s = PhaseSpace('Fe', mus={'O':[0,-4]})
>>> r = s.make_vs_chempot()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
self.make_vs_chempot(**kwargs)
self.renderer.xaxis.min = 0.5
self.renderer.xaxis.max = 1.5
self.renderer.xaxis.options["show"] = False
def make_vs_chempot(self, **kwargs):
"""
Plot of phase stability vs chemical potential for a range of
compositions.
Examples::
>>> s = PhaseSpace('Fe-Li', mus={'O':[0,-4]})
>>> r = s.make_vs_chempot()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
xaxis = Axis("x")
xaxis.min, xaxis.max = (0, 1)
xaxis.label = "-".join([format_comp(b) for b in self.bounds])
elt = list(self.mus.keys())[0]
yaxis = Axis("y", label="Δμ<sub>" + elt + "</sub>", units="eV/atom")
murange = list(self.mus.values())[0]
yaxis.min = min(murange)
yaxis.max = max(murange)
self.renderer.xaxis = xaxis
self.renderer.yaxis = yaxis
if False:
points = []
for window, hull in list(self.chempot_scan().items()):
hull = sorted(hull, key=lambda x: self.coord(x)[0])
for i in range(len(hull) - 1):
p1 = hull[i]
p2 = hull[i + 1]
x1 = self.coord(p1)[0]
x2 = self.coord(p2)[0]
line = Line(
[
Point([x1, window[0], window[1]]),
Point([x2, window[0], window[1]]),
],
fill=True,
)
self.renderer.add(line)
ps = PhaseSpace("-".join(self.space), data=self.data, load=None)
points = set()
lines = []
hlines = set()
for p in ps.stable:
if not self.in_bounds(p):
continue
bot, top = ps.stability_range(p, elt)
x = self.coord(p)[0]
line = Line([Point([x, bot]), Point([x, top])], color="blue")
lines.append(line)
hlines |= set([bot, top])
points.add(Point([x, bot]))
points.add(Point([x, top]))
y = np.average([bot, top])
if y < min(murange):
y = min(murange)
elif y > max(murange):
y = max(murange)
t = Text([x, y], "<b>%s</b>" % p.name)
self.renderer.add(t)
pc = PointCollection(list(points), color="green")
for h in hlines:
self.renderer.add(Line([[0, h], [1, h]], color="grey"))
for l in lines:
self.renderer.add(l)
self.renderer.add(pc)
self.renderer.options["grid"]["hoverable"] = True
def make_as_binary(self, **kwargs):
"""
Construct a binary phase diagram (convex hull) and write it to a
:mod:`~qmpy.Renderer`.
Examples::
>>> s = PhaseSpace('Fe-P')
>>> r = s.make_as_binary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
xlabel = "%s<sub>x</sub>%s<sub>1-x</sub>" % (
format_comp(self.bounds[0]),
format_comp(self.bounds[1]),
)
xaxis = Axis("x", label=xlabel)
xaxis.min, xaxis.max = (0, 1)
yaxis = Axis("y", label="Delta H", units="eV/atom")
self.renderer.xaxis = xaxis
self.renderer.yaxis = yaxis
for p1, p2 in self.tie_lines:
pt1 = Point([self.coord(p1)[0], self.phase_energy(p1)])
pt2 = Point([self.coord(p2)[0], self.phase_energy(p2)])
self.renderer.lines.append(Line([pt1, pt2], color="grey"))
points = []
for p in self.unstable:
if not p.use:
continue
if self.phase_energy(p) > 0:
continue
if not self.in_bounds(p):
continue
x = self.coord(p.unit_comp)[0]
pt = Point([x, self.phase_energy(p)], label=p.label)
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=1, color="red")
)
points = []
for p in self.stable:
if not self.in_bounds(p):
continue
x = self.coord(p.unit_comp)[0]
pt = Point([x, self.phase_energy(p)], label=p.label)
if p.show_label:
self.renderer.text.append(Text(pt, p.name))
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=True, color="green")
)
self.renderer.options["grid"]["hoverable"] = True
self.renderer.options["tooltip"] = True
self.renderer.options["tooltipOpts"] = {"content": "%label"}
def make_as_ternary(self, **kwargs):
"""
Construct a ternary phase diagram and write it to a
:mod:`~qmpy.Renderer`.
Examples::
>>> s = PhaseSpace('Fe-Li-O-P')
>>> r = s.make_as_quaternary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
for p1, p2 in self.tie_lines:
pt1 = Point(coord_to_gtri(self.coord(p1)))
pt2 = Point(coord_to_gtri(self.coord(p2)))
line = Line([pt1, pt2], color="grey")
self.renderer.lines.append(line)
points = []
for p in self.unstable:
if not self.in_bounds(p):
continue
if self.phase_dict[p.name] in self.stable:
continue
##pt = Point(coord_to_gtri(self.coord(p)), label=p.label)
options = {"hull_distance": p.stability}
pt = Point(coord_to_gtri(self.coord(p)), label=p.label, **options)
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=True, color="red")
)
self.renderer.options["xaxis"]["show"] = False
points = []
for p in self.stable:
if not self.in_bounds(p):
continue
pt = Point(coord_to_gtri(self.coord(p)), label=p.label)
if p.show_label:
self.renderer.add(Text(pt, p.name))
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=True, color="green")
)
self.renderer.options["grid"]["hoverable"] = (True,)
self.renderer.options["grid"]["borderWidth"] = 0
self.renderer.options["grid"]["margin"] = 4
self.renderer.options["grid"]["show"] = False
self.renderer.options["tooltip"] = True
def make_as_quaternary(self, **kwargs):
"""
Construct a quaternary phase diagram and write it to a
:mod:`~qmpy.Renderer`.
Examples::
>>> s = PhaseSpace('Fe-Li-O-P')
>>> r = s.make_as_quaternary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
# plot lines
for p1, p2 in self.tie_lines:
pt1 = Point(coord_to_gtet(self.coord(p1)))
pt2 = Point(coord_to_gtet(self.coord(p2)))
line = Line([pt1, pt2], color="grey")
self.renderer.add(line)
# plot compounds
### < Mohan
# Use phase_dict to collect unstable phases, which will
# return one phase per composition
points = []
for c, p in list(self.phase_dict.items()):
if not self.in_bounds(p):
continue
if p in self.stable:
continue
if p.stability == None:
continue
label = "{}<br> hull distance: {:.3f} eV/atom<br> formation energy: {:.3f} eV/atom".format(
p.name, p.stability, p.energy
)
pt = Point(coord_to_gtet(self.coord(p)), label=label)
points.append(pt)
self.renderer.add(PointCollection(points, color="red", label="Unstable"))
## Older codes:
# for p in self.unstable:
# if not self.in_bounds(p):
# continue
# pt = Point(coord_to_gtet(self.coord(p)), label=p.name)
# points.append(pt)
# self.renderer.add(PointCollection(points,
# color='red', label='Unstable'))
### Mohan >
points = []
for p in self.stable:
if not self.in_bounds(p):
continue
label = "%s:<br>- " % p.name
label += " <br>- ".join(o.name for o in list(self.graph[p].keys()))
pt = Point(coord_to_gtet(self.coord(p)), label=label)
points.append(pt)
if p.show_label:
self.renderer.add(Text(pt, format_html(p.comp)))
self.renderer.add(PointCollection(points, color="green", label="Stable"))
self.renderer.options["grid"]["hoverable"] = (True,)
self.renderer.options["grid"]["borderWidth"] = 0
self.renderer.options["grid"]["show"] = False
self.renderer.options["tooltip"] = True
def make_as_graph(self, **kwargs):
"""
Construct a graph-style visualization of the phase diagram.
"""
G = self.graph
positions = nx.drawing.nx_agraph.pygraphviz_layout(G)
for p1, p2 in self.tie_lines:
pt1 = Point(positions[p1])
pt2 = Point(positions[p2])
line = Line([pt1, pt2], color="grey")
self.renderer.add(line)
points = []
for p in self.stable:
label = "%s:<br>" % p.name
for other in list(G[p].keys()):
label += " -%s<br>" % other.name
pt = Point(positions[p], label=label)
points.append(pt)
if p.show_label:
self.renderer.add(Text(pt, p.name))
pc = PointCollection(points, color="green")
self.renderer.add(pc)
self.renderer.options["grid"]["hoverable"] = True
self.renderer.options["grid"]["borderWidth"] = 0
self.renderer.options["grid"]["show"] = False
self.renderer.options["tooltip"] = True
def stability_window(self, composition, **kwargs):
self.renderer = Renderer()
chem_pots = self.chempot_bounds(composition)
for eq, pots in list(chem_pots.items()):
pt = Point(coord_to_point([pots[k] for k in self.elements]))
self.renderer.add(pt)
def get_reaction(self, var, facet=None):
"""
For a given composition, what is the maximum delta_composition reaction
on the given facet. If None, returns the whole reaction for the given
PhaseSpace.
Examples::
>>> space = PhaseSpace('Fe2O3-Li2O')
>>> equilibria = space.hull[0]
>>> space.get_reaction('Li2O', facet=equilibria)
"""
if isinstance(var, str):
var = parse_comp(var)
if facet:
phases = facet
else:
phases = self.stable
prob = pulp.LpProblem("BalanceReaction", pulp.LpMaximize)
pvars = pulp.LpVariable.dicts("prod", phases, 0)
rvars = pulp.LpVariable.dicts("react", phases, 0)
prob += (
sum([p.fraction(var)["var"] * pvars[p] for p in phases])
- sum([p.fraction(var)["var"] * rvars[p] for p in phases]),
"Maximize delta comp",
)
for celt in self.space:
prob += (
sum([p.fraction(var)[celt] * pvars[p] for p in phases])
== sum([p.fraction(var)[celt] * rvars[p] for p in phases]),
"identical %s composition on both sides" % celt,
)
prob += sum([rvars[p] for p in phases]) == 1
if pulp.GUROBI().available():
prob.solve(pulp.GUROBI(msg=False))
elif pulp.COIN_CMD().available():
prob.solve(pulp.COIN_CMD())
elif pulp.COINMP_DLL().available():
prob.solve(pulp.COINMP_DLL())
else:
prob.solve()
prods = defaultdict(
float, [(c, pvars[c].varValue) for c in phases if pvars[c].varValue > 1e-4]
)
reacts = defaultdict(
float, [(c, rvars[c].varValue) for c in phases if rvars[c].varValue > 1e-4]
)
n_elt = pulp.value(prob.objective)
return reacts, prods, n_elt
def get_reactions(self, var, electrons=1.0):
"""
Returns a list of Reactions.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.get_reactions('Li', electrons=1)
"""
if isinstance(var, str):
var = parse_comp(var)
vname = format_comp(reduce_comp(var))
vphase = self.phase_dict[vname]
vpd = dict((self.phase_dict[k], v) for k, v in list(var.items()))
for facet in self.hull:
reacts, prods, delta_var = self.get_reaction(var, facet=facet)
if vphase in facet:
yield Reaction(
products={vphase: sum(vphase.comp.values())},
reactants={},
delta_var=1.0,
electrons=electrons,
variable=var,
)
continue
elif delta_var < 1e-6:
pass
yield Reaction(
products=prods,
reactants=reacts,
delta_var=delta_var,
variable=var,
electrons=electrons,
)
def plot_reactions(self, var, electrons=1.0, save=False):
"""
Plot the convex hull along the reaction path, as well as the voltage
profile.
"""
if isinstance(var, str):
var = parse_comp(var)
vname = format_comp(var)
fig = plt.figure()
ax1 = fig.add_subplot(211)
# plot tie lines
for p1, p2 in self.tie_lines:
c1 = p1.fraction(var)["var"]
c2 = p2.fraction(var)["var"]
if abs(c1) < 1e-4 or abs(c2) < 1e-4:
if abs(c1 - 1) < 1e-4 or abs(c2 - 1) < 1e-4:
if len(self.tie_lines) > 1:
continue
ax1.plot([c1, c2], [self.phase_energy(p1), self.phase_energy(p2)], "k")
# plot compounds
for p in self.stable:
x = p.fraction(var)["var"]
ax1.plot(x, self.phase_energy(p), "bo")
ax1.text(
x, self.phase_energy(p), "$\\rm{%s}$" % p.latex, ha="left", va="top"
)
plt.ylabel("$\\rm{\Delta H}$ $\\rm{[eV/atom]}$")
ymin, ymax = ax1.get_ylim()
ax1.set_ylim(ymin - 0.1, ymax)
ax2 = fig.add_subplot(212, sharex=ax1)
points = set()
for reaction in self.get_reactions(var, electrons=electrons):
if reaction.delta_var == 0:
continue
voltage = reaction.delta_h / reaction.delta_var / electrons
x1 = reaction.r_var_comp
x2 = reaction.p_var_comp
points |= set([(x1, voltage), (x2, voltage)])
points = sorted(points, key=lambda x: x[0])
points = sorted(points, key=lambda x: -x[1])
#!v
# print points
base = sorted(self.stable, key=lambda x: x.amt(var)["var"])[0]
max_x = max([k[0] for k in points])
if len(points) > 1:
for i in range(len(points) - 2):
ax2.plot(
[points[i][0], points[i + 1][0]],
[points[i][1], points[i + 1][1]],
"k",
)
ax2.plot(
[points[-2][0], points[-2][0]], [points[-2][1], points[-1][1]], "k"
)
ax2.plot([points[-2][0], max_x], [points[-1][1], points[-1][1]], "k")
else:
ax2.plot([0, max_x], [points[0][1], points[0][1]], "k")
plt.xlabel(
"$\\rm{x}$ $\\rm{in}$ $\\rm{(%s)_{x}(%s)_{1-x}}$"
% (format_latex(var), base.latex)
)
plt.ylabel("$\\rm{Voltage}$ $\\rm{[V]}$")
return ax1, ax2
# if not save:
# plt.show()
# else:
# plt.savefig('%s-%s.eps' % (save, vname),
# bbox_inches='tight',
# transparent=True,
# pad_inches=0)
|
wolverton-research-group/qmpy
|
qmpy/analysis/thermodynamics/space.py
|
Python
|
mit
| 57,105
|
[
"VASP"
] |
3da8683b025af4047058fe7ce38d1d0247a57a2fe0d4e262786044bd2737d125
|
# -*- coding: utf-8 -*-
"""
The Starshot module analyses a starshot image made of radiation spokes, whether gantry, collimator, MLC or couch.
It is based on ideas from `Depuydt et al <http://iopscience.iop.org/0031-9155/57/10/2997>`_
and `Gonzalez et al <http://dx.doi.org/10.1118/1.1755491>`_.
Features:
* **Analyze scanned film images, single EPID images, or a set of EPID images** -
Any image that you can load in can be analyzed, including 1 or a set of EPID DICOM images and
films that have been digitally scanned.
* **Any image size** - Have machines with different EPIDs? Scanned your film at different resolutions? No problem.
* **Dose/OD can be inverted** - Whether your device/image views dose as an increase in value or a decrease, pylinac
will detect it and invert if necessary.
* **Automatic noise detection & correction** - Sometimes there's dirt on the scanned film; sometimes there's a dead pixel on the EPID.
Pylinac will detect these spurious noise signals and can avoid or account for them.
* **Accurate, FWHM star line detection** - Pylinac uses not simply the maximum value to find the center of a star line,
but analyzes the entire star profile to determine the center of the FWHM, ensuring small noise or maximum value bias is avoided.
* **Adaptive searching** - If you passed pylinac a set of parameters and a good result wasn't found, pylinac can recover and
do an adaptive search by adjusting parameters to find a "reasonable" wobble.
"""
import copy
import dataclasses
import io
from dataclasses import dataclass
from typing import Union, List, Optional, Tuple, BinaryIO
import argue
import matplotlib.pyplot as plt
import numpy as np
from scipy import optimize
from typing.io import IO
from .core import image, pdf
from .core.geometry import Point, Line, Circle
from .core.io import get_url, TemporaryZipDirectory, retrieve_demo_file
from .core.profile import SingleProfile, CollapsedCircleProfile, Interpolation
from .core.utilities import open_path, ResultBase
from .settings import get_dicom_cmap
@dataclass
class StarshotResults(ResultBase):
"""This class should not be called directly. It is returned by the ``results_data()`` method.
It is a dataclass under the hood and thus comes with all the dunder magic.
Use the following attributes as normal class attributes.
"""
tolerance_mm: float #:
circle_diameter_mm: float #:
circle_radius_mm: float #:
passed: bool #:
circle_center_x_y: Tuple[float, float] #:
class Starshot:
"""Class that can determine the wobble in a "starshot" image, be it gantry, collimator,
couch or MLC. The image can be a scanned film (TIF, JPG, etc) or a sequence of EPID DICOM images.
Attributes
----------
image : :class:`~pylinac.core.image.Image`
circle_profile : :class:`~pylinac.starshot.StarProfile`
lines : :class:`~pylinac.starshot.LineManager`
wobble : :class:`~pylinac.starshot.Wobble`
tolerance : :class:`~pylinac.starshot.Tolerance`
Examples
--------
Run the demo:
>>> Starshot.run_demo()
Typical session:
>>> img_path = r"C:/QA/Starshots/Coll.jpeg"
>>> mystar = Starshot(img_path, dpi=105, sid=1000)
>>> mystar.analyze()
>>> print(mystar.results())
>>> mystar.plot_analyzed_image()
"""
def __init__(self, filepath: Union[str, BinaryIO], **kwargs):
"""
Parameters
----------
filepath
The path to the image file.
kwargs
Passed to :func:`~pylinac.core.image.load`.
"""
self.image = image.load(filepath, **kwargs)
self.wobble = Wobble()
self.tolerance = 1
if self.image.dpmm is None:
raise ValueError("DPI was not a tag in the image nor was it passed in. Please pass a DPI value")
if self.image.sid is None:
raise ValueError("Source-to-Image distance was not an image tag and was not passed in. Please pass an SID value.")
@classmethod
def from_url(cls, url: str, **kwargs):
"""Instantiate from a URL.
Parameters
----------
url : str
URL of the raw file.
kwargs
Passed to :func:`~pylinac.core.image.load`.
"""
filename = get_url(url)
return cls(filename, **kwargs)
@classmethod
def from_demo_image(cls):
"""Construct a Starshot instance and load the demo image."""
demo_file = retrieve_demo_file(url='starshot.tif')
return cls(demo_file, sid=1000)
@classmethod
def from_multiple_images(cls, filepath_list: list, **kwargs):
"""Construct a Starshot instance and load in and combine multiple images.
Parameters
----------
filepath_list : iterable
An iterable of file paths to starshot images that are to be superimposed.
kwargs
Passed to :func:`~pylinac.core.image.load_multiples`.
"""
obj = cls.from_demo_image()
obj.image = image.load_multiples(filepath_list, **kwargs)
return obj
@classmethod
def from_zip(cls, zip_file: str, **kwargs):
"""Construct a Starshot instance from a ZIP archive.
Parameters
----------
zip_file : str
Points to the ZIP archive. Can contain a single or multiple images. If multiple images
the images are combined and thus should be from the same test sequence.
kwargs
Passed to :func:`~pylinac.core.image.load_multiples`.
"""
with TemporaryZipDirectory(zip_file) as tmpdir:
image_files = image.retrieve_image_files(tmpdir)
if not image_files:
raise IndexError(f"No valid starshot images were found in {zip_file}")
if len(image_files) > 1:
return cls.from_multiple_images(image_files, **kwargs)
else:
return cls(image_files[0], **kwargs)
def _get_reasonable_start_point(self) -> Point:
"""Set the algorithm starting point automatically.
Notes
-----
The determination of an automatic start point is accomplished by finding the Full-Width-80%-Max.
Finding the maximum pixel does not consistently work, esp. in the presence of a pin prick. The
FW80M is a more consistent metric for finding a good start point.
"""
# sum the image along each axis within the central 1/3 (avoids outlier influence from say, gantry shots)
top_third = int(self.image.array.shape[0]/3)
bottom_third = int(top_third * 2)
left_third = int(self.image.array.shape[1]/3)
right_third = int(left_third * 2)
central_array = self.image.array[top_third:bottom_third, left_third:right_third]
x_sum = np.sum(central_array, 0)
y_sum = np.sum(central_array, 1)
# Calculate Full-Width, 80% Maximum center
fwxm_x_point = SingleProfile(x_sum, interpolation=Interpolation.NONE).fwxm_data(80)['center index (rounded)'] + left_third
fwxm_y_point = SingleProfile(y_sum, interpolation=Interpolation.NONE).fwxm_data(80)['center index (rounded)'] + top_third
center_point = Point(fwxm_x_point, fwxm_y_point)
return center_point
@argue.bounds(radius=(0.2, 0.95), min_peak_height=(0.05, 0.95))
def analyze(self, radius: float = 0.85, min_peak_height: float = 0.25, tolerance: float = 1.0,
start_point: Optional[Union[Point, tuple]] = None, fwhm: bool = True, recursive: bool = True, invert: bool = False):
"""Analyze the starshot image.
Analyze finds the minimum radius and center of a circle that touches all the lines
(i.e. the wobble circle diameter and wobble center).
Parameters
----------
radius : float, optional
Distance in % between starting point and closest image edge; used to build the circular profile which finds
the radiation lines. Must be between 0.05 and 0.95.
min_peak_height : float, optional
The percentage minimum height a peak must be to be considered a valid peak. A lower value catches
radiation peaks that vary in magnitude (e.g. different MU delivered or gantry shot), but could also pick up noise.
If necessary, lower value for gantry shots and increase for noisy images.
tolerance : int, float, optional
The tolerance in mm to test against for a pass/fail result.
start_point : 2-element iterable, optional
The point where the algorithm should center the circle profile, given as (x-value, y-value).
If None (default), will search for a reasonable maximum point nearest the center of the image.
fwhm : bool
If True (default), the center of the FWHM of the spokes will be determined.
If False, the peak value location is used as the spoke center.
.. note:: In practice, this ends up being a very small difference. Set to false if peak locations are offset or unexpected.
recursive : bool
If True (default), will recursively search for a "reasonable" wobble, meaning the wobble radius is
<3mm. If the wobble found was unreasonable,
the minimum peak height is iteratively adjusted from low to high at the passed radius.
If for all peak heights at the given radius the wobble is still unreasonable, the
radius is then iterated over from most distant inward, iterating over minimum peak heights at each radius.
If False, will simply return the first determined value or raise error if a reasonable wobble could not be determined.
.. warning:: It is strongly recommended to leave this setting at True.
invert : bool
Whether to force invert the image values. This should be set to True if the automatically-determined
pylinac inversion is incorrect.
Raises
------
RuntimeError
If a reasonable wobble value was not found.
"""
self.tolerance = tolerance
self.image.check_inversion_by_histogram(percentiles=[4, 50, 96])
if invert:
self.image.invert()
if start_point is None:
start_point = self._get_reasonable_start_point()
self._get_reasonable_wobble(start_point, fwhm, min_peak_height, radius, recursive)
def _get_reasonable_wobble(self, start_point, fwhm, min_peak_height, radius, recursive):
"""Determine a wobble that is "reasonable". If recursive is false, the first iteration will be passed,
otherwise the parameters will be tweaked to search for a reasonable wobble."""
wobble_unreasonable = True
focus_point = copy.copy(start_point)
peak_gen = get_peak_height()
radius_gen = get_radius()
while wobble_unreasonable:
try:
self.circle_profile = StarProfile(self.image, focus_point, radius, min_peak_height, fwhm)
if (len(self.circle_profile.peaks) < 6) or (len(self.circle_profile.peaks) % 2 != 0):
raise ValueError
self.lines = LineManager(self.circle_profile.peaks)
self._find_wobble_minimize()
except ValueError:
if not recursive:
raise RuntimeError("The algorithm was unable to properly detect the radiation lines. Try setting "
"recursive to True or lower the minimum peak height")
else:
try:
min_peak_height = next(peak_gen)
except StopIteration:
# if no height setting works, change the radius and reset the height
try:
radius = next(radius_gen)
peak_gen = get_peak_height()
except StopIteration:
raise RuntimeError("The algorithm was unable to determine a reasonable wobble. Try setting "
"recursive to False and manually adjusting algorithm parameters")
else: # if no errors are raised
# set the focus point to the wobble minimum
# focus_point = self.wobble.center
# finally:
# stop after first iteration if not recursive
if not recursive:
wobble_unreasonable = False
# otherwise, check if the wobble is reasonable
else:
# if so, stop
if self.wobble.diameter_mm < 2:
focus_near_center = self.wobble.center.distance_to(focus_point) < 5
if focus_near_center:
wobble_unreasonable = False
else:
focus_point = self.wobble.center
# otherwise, iterate through peak height
else:
try:
min_peak_height = next(peak_gen)
except StopIteration:
# if no height setting works, change the radius and reset the height
try:
radius = next(radius_gen)
peak_gen = get_peak_height()
except StopIteration:
raise RuntimeError("The algorithm was unable to determine a reasonable wobble. Try setting "
"recursive to False and manually adjusting algorithm parameters")
def _find_wobble_minimize(self) -> None:
"""Find the minimum distance wobble location and radius to all radiation lines.
The minimum is found using a scipy minimization function.
"""
# starting point
sp = copy.copy(self.circle_profile.center)
def distance(p, lines):
"""Calculate the maximum distance to any line from the given point."""
return max(line.distance_to(Point(p[0], p[1])) for line in lines)
res = optimize.minimize(distance, sp.as_array(), args=(self.lines,), method='Nelder-Mead', options={'fatol': 0.001})
# res = optimize.least_squares(distance, sp.as_array(), args=(self.lines,), ftol=0.001)
self.wobble.radius = res.fun
self.wobble.radius_mm = res.fun / self.image.dpmm
self.wobble.center = Point(res.x[0], res.x[1])
@property
def passed(self) -> bool:
"""Boolean specifying whether the determined wobble was within tolerance."""
return self.wobble.radius_mm * 2 < self.tolerance
@property
def _passfail_str(self) -> str:
"""Return a pass/fail string."""
return 'PASS' if self.passed else 'FAIL'
def results(self) -> str:
"""Return the results of the analysis.
Returns
-------
string
A string with a statement of the minimum circle.
"""
string = (f'\nResult: {self._passfail_str} \n\n' +
f'The minimum circle that touches all the star lines has a diameter of {self.wobble.radius_mm*2:2.3f} mm. \n\n' +
f'The center of the minimum circle is at {self.wobble.center.x:3.1f}, {self.wobble.center.y:3.1f}')
return string
def results_data(self, as_dict: bool = False) -> Union[StarshotResults, dict]:
"""Present the results data and metadata as a dataclass or dict.
The default return type is a dataclass."""
data = StarshotResults(
tolerance_mm=self.tolerance,
circle_diameter_mm=self.wobble.radius_mm * 2,
circle_radius_mm=self.wobble.radius_mm,
circle_center_x_y=(self.wobble.center.x, self.wobble.center.y),
passed=self.passed,
)
if as_dict:
return dataclasses.asdict(data)
return data
def plot_analyzed_image(self, show: bool=True):
"""Draw the star lines, profile circle, and wobble circle on a matplotlib figure.
Parameters
----------
show : bool
Whether to actually show the image.
"""
fig, axes = plt.subplots(ncols=2)
subimages = ('whole', 'wobble')
titles = ('Analyzed Image', 'Wobble Circle')
# show images
for ax, subimage, title in zip(axes, subimages, titles):
self.plot_analyzed_subimage(ax=ax, show=False, subimage=subimage)
ax.set_title(title)
if show:
plt.show()
def plot_analyzed_subimage(self, subimage: str='wobble', ax: Optional[plt.Axes]=None, show: bool=True):
"""Plot a subimage of the starshot analysis. Current options are the zoomed out image and the zoomed in image.
Parameters
----------
subimage : str
If 'wobble', will show a zoomed in plot of the wobble circle.
Any other string will show the zoomed out plot.
ax : None, matplotlib Axes
If None (default), will create a new figure to plot on, otherwise plot to the passed axes.
"""
if ax is None:
fig, ax = plt.subplots()
# show analyzed image
ax.imshow(self.image.array, cmap=get_dicom_cmap())
self.lines.plot(ax)
self.wobble.plot2axes(ax, edgecolor='green')
self.circle_profile.plot2axes(ax, edgecolor='green')
ax.autoscale(tight=True)
ax.axis('off')
# zoom in if wobble plot
if subimage == 'wobble':
xlims = [self.wobble.center.x + self.wobble.diameter, self.wobble.center.x - self.wobble.diameter]
ylims = [self.wobble.center.y + self.wobble.diameter, self.wobble.center.y - self.wobble.diameter]
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.axis('on')
if show:
plt.show()
def save_analyzed_image(self, filename: str, **kwargs):
"""Save the analyzed image plot to a file.
Parameters
----------
filename : str, IO stream
The filename to save as. Format is deduced from string extention, if there is one. E.g. 'mystar.png' will
produce a PNG image.
kwargs
All other kwargs are passed to plt.savefig().
"""
self.plot_analyzed_image(show=False)
plt.savefig(filename, **kwargs)
def save_analyzed_subimage(self, filename: str, subimage: str='wobble', **kwargs):
"""Save the analyzed subimage to a file.
Parameters
----------
filename : str, file-object
Where to save the file to.
subimage : str
If 'wobble', will show a zoomed in plot of the wobble circle.
Any other string will show the zoomed out plot.
kwargs
Passed to matplotlib.
"""
self.plot_analyzed_subimage(subimage=subimage, show=False)
plt.savefig(filename, **kwargs)
def publish_pdf(self, filename: Union[str, IO], notes: Optional[Union[str, List[str]]]=None, open_file: bool=False, metadata: Optional[dict]=None):
"""Publish (print) a PDF containing the analysis, images, and quantitative results.
Parameters
----------
filename : (str, file-like object}
The file to write the results to.
notes : str, list of strings
Text; if str, prints single line.
If list of strings, each list item is printed on its own line.
open_file : bool
Whether to open the file using the default program after creation.
metadata : dict
Extra data to be passed and shown in the PDF. The key and value will be shown with a colon.
E.g. passing {'Author': 'James', 'Unit': 'TrueBeam'} would result in text in the PDF like:
--------------
Author: James
Unit: TrueBeam
--------------
"""
canvas = pdf.PylinacCanvas(filename, page_title="Starshot Analysis", metadata=metadata)
for img, height in zip(('wobble', 'asdf'), (2, 11.5)):
data = io.BytesIO()
self.save_analyzed_subimage(data, img)
canvas.add_image(data, location=(4, height), dimensions=(13, 13))
text = ['Starshot results:',
f'Source-to-Image Distance (mm): {self.image.sid:2.0f}',
f'Tolerance (mm): {self.tolerance:2.1f}',
f"Minimum circle diameter (mm): {self.wobble.radius_mm*2:2.2f}",
]
canvas.add_text(text=text, location=(10, 25.5), font_size=12)
if notes is not None:
canvas.add_text(text="Notes:", location=(1, 5.5), font_size=14)
canvas.add_text(text=notes, location=(1, 5))
canvas.finish()
if open_file:
open_path(filename)
@staticmethod
def run_demo():
"""Demonstrate the Starshot module using the demo image."""
star = Starshot.from_demo_image()
star.analyze()
print(star.results())
star.plot_analyzed_image()
class Wobble(Circle):
"""A class that holds the wobble information of the Starshot analysis.
Attributes
----------
radius_mm : The radius of the Circle in **mm**.
"""
def __init__(self, center_point=None, radius=None):
super().__init__(center_point=center_point, radius=radius)
self.radius_mm = 0 # The radius of the wobble in mm; as opposed to pixels.
@property
def diameter_mm(self) -> float:
"""Diameter of the wobble in mm."""
return self.radius_mm*2
class LineManager:
"""Manages the radiation lines found."""
def __init__(self, points: List[Point]):
"""
Parameters
----------
points :
The peak points found by the StarProfile
"""
self.lines = []
self.construct_rad_lines(points)
def __getitem__(self, item):
return self.lines[item]
def __len__(self):
return len(self.lines)
def construct_rad_lines(self, points: List[Point]):
"""Find and match the positions of peaks in the circle profile (radiation lines)
and map their positions to the starshot image.
Radiation lines are found by finding the FWHM of the radiation spokes, then matching them
to form lines.
Returns
-------
lines : list
A list of Lines (radiation lines) found.
See Also
--------
Starshot.analyze() : min_peak_height parameter info
core.profile.CircleProfile.find_FWXM_peaks : min_peak_distance parameter info.
geometry.Line : returning object
"""
self.match_points(points)
def match_points(self, points: List[Point]):
"""Match the peaks found to the same radiation lines.
Peaks are matched by connecting the existing peaks based on an offset of peaks. E.g. if there are
12 peaks, there must be 6 radiation lines. Furthermore, assuming star lines go all the way across the CAX,
the 7th peak will be the opposite peak of the 1st peak, forming a line. This method is robust to
starting points far away from the real center.
"""
num_rad_lines = int(len(points) / 2)
offset = num_rad_lines
self.lines = [Line(points[line], points[line + offset]) for line in range(num_rad_lines)]
def plot(self, axis: plt.Axes):
"""Plot the lines to the axis."""
for line in self.lines:
line.plot2axes(axis, color='blue')
class StarProfile(CollapsedCircleProfile):
"""Class that holds and analyzes the circular profile which finds the radiation lines."""
def __init__(self, image, start_point, radius, min_peak_height, fwhm):
radius = self._convert_radius_perc2pix(image, start_point, radius)
super().__init__(center=start_point, radius=radius, image_array=image.array, width_ratio=0.1, sampling_ratio=3)
self.get_peaks(min_peak_height, fwhm=fwhm)
@staticmethod
def _convert_radius_perc2pix(image, start_point, radius):
"""Convert a percent radius to distance in pixels, based on the distance from center point to image
edge.
Parameters
----------
radius : float
The radius ratio (e.g. 0.5).
"""
return image.dist2edge_min(start_point) * radius
def _roll_prof_to_midvalley(self) -> int:
"""Roll the circle profile so that its edges are not near a radiation line.
This is a prerequisite for properly finding star lines.
"""
roll_amount = np.where(self.values == self.values.min())[0][0]
self.roll(roll_amount)
return roll_amount
def get_peaks(self, min_peak_height, min_peak_distance=0.02, fwhm=True):
"""Determine the peaks of the profile."""
self._roll_prof_to_midvalley()
self.filter(size=0.003, kind='gaussian')
self.ground()
if fwhm:
self.find_fwxm_peaks(threshold=min_peak_height, min_distance=min_peak_distance)
else:
self.find_peaks(min_peak_height, min_peak_distance)
def get_peak_height():
for height in np.linspace(0.05, 0.95, 10):
yield height
def get_radius():
for radius in np.linspace(0.95, 0.1, 10):
yield radius
|
jrkerns/pylinac
|
pylinac/starshot.py
|
Python
|
mit
| 25,570
|
[
"Gaussian"
] |
7b24cfa63d5896ed639b8b694308fab331416ba778e19d9f263e4af4d3bdd3eb
|
"""
**update**
1. update zeropadding()
2. add automatic zeropadding into xcorr and xcor
3. add infmax and infmin
4. add method keyword in zeropadding, can do corner/center zeropadding.
5. zeropadding can be done by providing axis and new_axis.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import gaussian
from scipy.ndimage.filters import _ni_support
##math tools
def int_out(x):
"""If x is an integer, convert x to int."""
if x.is_integer():
x=int(x)
return x
def roundn(x,n=1,**kwargs):
"""Round x to the nth effective digit."""
out=round(x,-int(np.floor(np.log10(x)))+n-1)
if kwargs.get('int_output')==True:
out=int_out(out)
return out
def find_nearest(x,lib=[1,2,5]):
"""round x to the nearest number in lib"""
return lib[np.abs(np.array(lib)-x).argmin()]
def search(x,lib=[1,2,5],period=10):
"""find the nearest number in lib*period**n for all integer n"""
p=int(np.floor(np.log(x)/np.log(period)))
return find_nearest(x*period**(-p),lib)*period**p
def convert_unit(value,unit='mm',new_unit='nm'):
"""value (in unit) -> new_value (in new_unit)"""
unitlib={'km':1000.,'m':1.,'dm':0.1,'cm':0.01,'mm':0.001,'um':10**-6,'nm':10**-9,'$\AA$':10**-10}
return value*unitlib[unit]/unitlib[new_unit]
def inverse_dict(dic):
return {value:key for key,value in dic.items()}
def auto_unit(value,unit='mm',**kwargs):
"""value, unit -> appropriate new_value, new_unit"""
unitlib={'km':1000.,'m':1.,'dm':0.1,'cm':0.01,'mm':0.001,'um':10**-6,'nm':10**-9,'$\AA$':10**-10}
ivd=inverse_dict(unitlib)
new_unit=ivd[search(unitlib[unit]*value,lib=[1],period=1000)]
new_value=int_out(float(str(convert_unit(value,unit,new_unit))))
return new_value,new_unit
def nanmean(x):
return np.nansum(x)*1./np.sum(np.isfinite(x))
def nan_to_value(x,value='average'):
if value=='average':
value=nanmean(x)
x=np.array(x)
x[np.isnan(x)]=value
return x
def infmax(arr):
x, y = np.where(arr == np.inf)
arr[x, y] = -np.inf
return np.max(arr)
def infmin(arr):
x, y = np.where(arr == np.inf)
arr[x, y] = np.inf
return np.min(arr)
def infnanmax(arr):
x, y = np.where(arr == np.inf)
arr[x, y] = -np.inf
return np.nanmax(arr)
def infnanmin(arr):
x, y = np.where(arr == np.inf)
arr[x, y] = np.inf
return np.nanmin(arr)
def simplify_array(arr):
for x in range(len(arr)):
arr[x] = float(str(arr[x]))
return arr
def gaussian_func(x, sigma):
return np.exp(-0.5*(float(x)/sigma)**2)
def gaussian_kernel1d(sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the length of the filter equal to 4 times the standard
# deviations:
lw = int(4.0 * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return weights
#image tools
def tukey(alpha,N):
w=np.empty(N)
for n in range(N):
if 0<=n<alpha*(N-1)/2.:
w[n]=1/2.*(1+np.cos(np.pi*(2*n/alpha/(N-1.)-1)))
elif alpha*(N-1)/2.<=n<=(N-1)*(1-alpha/2.):
w[n]=1
else:
w[n]=1/2.*(1+np.cos(np.pi*(2*n/alpha/(N-1.)-2./alpha+1)))
return w
def tukey2d(alpha,M,N):
x=tukey(alpha,N)
y=tukey(alpha,M)
X,Y=np.meshgrid(x,y)
return X*Y
def xcorr(x,y,**kwargs):
"""cross correlation by rfft"""
x = np.asarray(x)
y = np.asarray(y)
if np.ndim(x) == np.ndim(y):
shape=kwargs.get('shape',np.max((x.shape, y.shape), axis = 0))
return np.fft.irfftn(np.conjugate(np.fft.rfftn(x,s=shape))*np.fft.rfftn(y,s=shape))
elif np.ndim(y) == 1:
axis = kwargs.get('axis', 0)
shape=kwargs.get('shape', max(x.shape[axis], len(y)))
shape+=shape%2
outshape = np.array(x.shape[:])
outshape[axis] = shape
out = np.zeros(outshape)
y = np.fft.ifftshift(np.pad(y, pad_width = (int((shape-len(y)+1)/2), int((shape-len(y))/2)), mode = 'constant'))
y_fft = np.fft.rfft(y, n=shape)
x_fft = np.fft.rfft(x, n=shape, axis=axis)
if axis == 0:
for ii in range(len(x_fft[0])):
out[:,ii] = np.fft.irfft(x_fft[:,ii]*np.conjugate(y_fft))
else:
for ii in range(len(x_fft)):
out[ii] = np.fft.irfft(x_fft[ii]*np.conjugate(y_fft))
return out
else:
raise ValueError('Only inputs with dimensions of 1 or 2 can be processed.')
def xcor(x,y,**kwargs):
"""cross correlation by fft"""
if np.ndim(x) == 1 and np.ndim(y) == 1:
shape=kwargs.get('shape',max(len(x),len(y)))
return np.fft.ifft(np.conjugate(np.fft.fft(x,s=shape))*np.fft.fft(y,s=shape))
elif np.ndim(x) == 2 and np.ndim(y) == 2:
shape=kwargs.get('shape',[max(len(x),len(y)),max(len(x[0]),len(y[0]))])
return np.fft.ifft2(np.conjugate(np.fft.fft2(x,s=shape))*np.fft.fft2(y,s=shape))
else:
raise ValueError('Only inputs with dimensions of 1 or 2 can be processed.')
def fftfreq2d(m,n=None,d=1,zero='center',axis=0):
x=np.fft.fftfreq(m,d)
if n==None: n=m
y=np.fft.fftfreq(n,d)
X,Y=np.meshgrid(x,y)
if axis==0: result=X
else: result=Y
if zero=='center':
result=np.fft.fftshift(result,1-axis)
return result
def shiftimg_fft(im_fft,shifts,zero='front'):
"""fft of image -> shifted fft of image by x=shifts[0], y=shifts[1]"""
im_fft_shift=im_fft[:]
for n in range(len(shifts)):
im_fft_shift=im_fft_shift*np.exp(1j*fftfreq2d(np.shape(im_fft)[1],np.shape(im_fft)[0],axis=n,zero=zero)*2*np.pi*(-shifts[n]))
return im_fft_shift
def shiftimg(im,shifts,**kwargs):
"""image -> shifted image by x=shifts[0], y=shifts[1]"""
im_fft=np.fft.fft2(im)
im_fft_shift=shiftimg_fft(im_fft,shifts,**kwargs)
return np.real(np.fft.ifft2(im_fft_shift))
def zeropadding(im,new_shape=None,ratio=None, axis=None, new_axis=None, **kwargs):
"""Pad ndarray im with zeros arround the original ndarray.
Shape of padded image indicated in 'new_shape'.
Input the ratio of new image to original (new_shape/np.shape(im)) through ratio is also acceptable."""
actual_axis = kwargs.get('actual_axis')
if ratio or new_shape:
method = kwargs.get('method', 'center')
if ratio:
new_shape=np.round(np.array(np.shape(im))*np.array(ratio))
im_padded=np.zeros(new_shape)
if np.iscomplex(im).any():
im_padded=im_padded*complex(1)
if method == 'center':
x1 = -(new_shape[1]-len(im[0])+(new_shape[1]-len(im[0]))%2)/2
x2 = x1 + new_shape[1]
y1 = -(new_shape[0]-len(im)+(new_shape[0]-len(im))%2)/2
y2 = y1 + new_shape[0]
else:
x1 = 0
x2 = new_shape[1]
y1 = 0
y2 = new_shape[0]
elif axis and new_axis:
x1=round((new_axis[0]-axis[0])*1./(axis[1]-axis[0])*len(im[0]))
x2=round((new_axis[1]-axis[0])*1./(axis[1]-axis[0])*len(im[0]))
y1=round((new_axis[3]-axis[3])*1./(axis[2]-axis[3])*len(im))
y2=round((new_axis[2]-axis[3])*1./(axis[2]-axis[3])*len(im))
im_padded = np.zeros([y2 - y1, x2 - x1])
else:
raise TypeError("Either 'new_shape' or 'ratio' or ('axis' and 'new_axis') must be given")
try:
im_padded[-y1:-y1+len(im),-x1:-x1+len(im[0])] = im[:]
except IndexError or ValueError:
raise ValueError('axis should be included in new_axis')
if actual_axis and (axis is not None):
actual_x1=(axis[1]-axis[0])*x1*1./len(im[0])+axis[0]
actual_x2=(axis[1]-axis[0])*x2*1./len(im[0])+axis[0]
actual_y2=(axis[2]-axis[3])*y1*1./len(im)+axis[3]
actual_y1=(axis[2]-axis[3])*y2*1./len(im)+axis[3]
return im_padded, [actual_x1, actual_x2, actual_y1, actual_y2]
else:
return im_padded
def crop(image,**kwargs):
new_shape=kwargs.get('new_shape')
scale=kwargs.get('scale')
axis=kwargs.get('axis',[0,len(image[0]),0,len(image)])
new_axis=kwargs.get('new_axis')
slice_pixel = kwargs.get('slice_pixel')
actual_axis = kwargs.get('actual_axis')
if scale or new_shape:
if scale:
new_shape=np.round(np.array(np.shape(image))*np.array(scale))
x1 = int((len(image[0])-new_shape[1])/2)
x2 = x1+new_shape[1]
y1 = int((len(image)-new_shape[0])/2)
y2 = y1+new_shape[0]
elif slice_pixel:
y1, y2, x1, x2 = tuple(slice_pixel)
elif axis and new_axis:
actual_axis = kwargs.get('actual_axis')
x1=round((new_axis[0]-axis[0])*1./(axis[1]-axis[0])*len(image[0]))
x2=round((new_axis[1]-axis[0])*1./(axis[1]-axis[0])*len(image[0]))
y1=round((new_axis[3]-axis[3])*1./(axis[2]-axis[3])*len(image))
y2=round((new_axis[2]-axis[3])*1./(axis[2]-axis[3])*len(image))
else:
raise ValueError("'new_shape' or 'scale' or 'new_axis' must be given")
if actual_axis and (axis is not None):
actual_x1=(axis[1]-axis[0])*x1*1./len(image[0])+axis[0]
actual_x2=(axis[1]-axis[0])*x2*1./len(image[0])+axis[0]
actual_y2=(axis[2]-axis[3])*y1*1./len(image)+axis[3]
actual_y1=(axis[2]-axis[3])*y2*1./len(image)+axis[3]
return image[y1:y2,x1:x2], [actual_x1, actual_x2, actual_y1, actual_y2]
else:
return image[y1:y2,x1:x2]
def fft_resample(image, scale, **kwargs):
"""resample 'image' to 'scale' using fft"""
conserve_min = kwargs.get('conserve_min')
conserve_range = kwargs.get('conserve_range')
rescale_range = kwargs.get('rescale_range')
zoomin_filter = kwargs.get('zoomin_filter')
sigma = kwargs.get('gaussian_sigma', 1./scale)
im_fft=np.fft.fft2(image)
if zoomin_filter != 'gaussian':
im_fft=np.fft.fftshift(im_fft,axes=[0,1])
if scale>1:
im_fft=zeropadding(im_fft,ratio=scale)
elif scale<1:
if zoomin_filter == 'gaussian':
#_x = np.array(gaussian(len(im_fft[0]), sigma), ndmin = 2)
#_y = np.array(gaussian(len(im_fft), sigma), ndmin = 2)
#_gaussian = _x*_y.transpose()
_temp = np.array(gaussian_kernel1d(sigma), ndmin = 2)
_gaussian = _temp.transpose()*_temp
_gaussian = zeropadding(_gaussian, new_shape = im_fft.shape)
_gaussian = np.fft.fft2(np.fft.ifftshift(_gaussian))
im_fft = np.fft.fftshift(im_fft*_gaussian)
im_fft=crop(im_fft,scale=scale)
im_fft=np.fft.ifftshift(im_fft,axes=[0,1])
out = np.real(np.fft.ifft2(im_fft))
if conserve_min:
out -= np.min(out)
if conserve_range:
out *= (np.max(image) - np.min(image))*1./(np.max(out)-np.min(out))
if rescale_range:
out *= scale
return out
def overlay_display(img1, img2, axis1 = None, axis2 = None, translation = None, remove_background = True, normalize = True, **kwargs):
if remove_background:
img1 = img1 - np.min(img1)
img2 = img2 - np.min(img2)
if normalize:
img1 = img1/np.max(img1)
img2 = img2/np.max(img2)
if (axis1 is not None) and (axis2 is not None):
translation = [0, 0]
translation[0] = round((axis2[0]-axis1[0])*1./(axis1[1]-axis1[0])*len(img1[0]))
translation[1] = round((axis2[3]-axis1[3])*1./(axis1[2]-axis1[3])*len(img1))
x_start = min(0, translation[0])
y_start = min(0, translation[1])
x_end = max(np.shape(img1)[1], np.shape(img2)[1] + translation[0])
y_end = max(np.shape(img1)[0], np.shape(img2)[0] + translation[1])
disp = np.zeros([y_end - y_start, x_end - x_start, 3])
disp[-y_start:-y_start+np.shape(img1)[0], -x_start:-x_start+np.shape(img1)[1], 0] = img1[:]
disp[-y_start + translation[1]:-y_start + translation[1] +np.shape(img2)[0], -x_start + translation[0]:-x_start + translation[0]+np.shape(img2)[1], 1] = img2[:]
plt.figure()
plt.imshow(disp, **kwargs)
plt.show()
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
Returns
-------
gaussian_filter1d : ndarray
"""
#weights = gaussian(sigma)
weights = gaussian_kernel1d(sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0)
return xcorr(input, weights, axis=axis)
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = np.asarray(input)
"""
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
"""
axes = list(range(input.ndim))
if type(sigma) is int:
sigmas = np.ones(len(axes))*sigma
else:
sigmas = sigma[:]
if type(order) is int:
orders = np.ones(len(axes))*order
else:
orders = order[:]
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
output = gaussian_filter1d(input, sigma, axis, order, output,
mode, cval)
input = output
else:
output[...] = input[...]
return output
def zoom(img, zoom):
return fft_resample(img, scale = zoom, zoomin_filter = 'gaussian', conserve_min=True, rescale_range = True)
|
gromitsun/multi-scale-image
|
multiimgs/tools.py
|
Python
|
lgpl-3.0
| 15,334
|
[
"Gaussian"
] |
e06aba296beb3a71b8d79200baa16e461e96401f2da9e06a837d4dc317ee1b33
|
# -*- coding: utf-8 -*-
"""
Objects to represent elements of Euclidean domains that can be expressed as
a + b u, where u is some unit and a and b are integers. Also implements
the Euclidean algorithm to factor those elements.
Gaussian: u = j
Eisenstein: u = exp(2pi/3)
Steineisen: u = exp(pi/3) (isomorphic to Eisenstein but useful)
(regular) Integers: b = 0, u = -1
See https://en.wikipedia.org/wiki/Euclidean_domain for more info.
"""
import abc
import math
from math import gcd
def smallest_prime_factor(n):
"""The smallest factor of n larger than 1. Will be n if n is prime.
>>> smallest_prime_factor(7)
7
>>> smallest_prime_factor(15)
3
"""
if abs(n) <= 3:
return n
for i in range(2, n + 1):
if n%i == 0:
return i
class EuclideanInteger(metaclass=abc.ABCMeta):
"""Abstract base class for elements in Euclidean domains.
Args:
a: First component of the element.
b: Second component (default: 0)
Class-level attributes:
symbol: Symbol to use for the unit (j, w, etc.)
unit: Numeric form of the unit
mod: Argument for modulus in Euclidean algorithm.
unitmap: A map from units to a nice printable form for them."""
symbol = NotImplemented
unit = NotImplemented
mod = NotImplemented
unitmap = NotImplemented
def __init__(self, a, b=0):
self.a = int(a)
self.b = int(b)
@property
def tuple(self):
"""The components of the element, in tuple form."""
return (self.a, self.b)
def __complex__(self):
return self.a + self.unit * self.b
def __eq__(self, other):
return (self.a == other.a and self.b == other.b
and self.unit == other.unit)
def __lt__(self, other):
return self.anorm() < other.anorm()
def __bool__(self):
return bool(self.a or self.b)
def __hash__(self):
return hash((self.tuple))
def __sub__(self, other):
return type(self)(self.a - other.a, self.b - other.b)
def __neg__(self):
return type(self)(-self.a, -self.b)
def __pos__(self):
return self
def __abs__(self):
return math.sqrt(self.anorm())
@abc.abstractmethod
def __mul__(self, other):
return NotImplemented
@abc.abstractmethod
def __divmod__(self, other):
return NotImplemented, NotImplemented
@abc.abstractmethod
def conjugate(self):
"""Conjugate of the element."""
return NotImplemented
@abc.abstractmethod
def anorm(self):
"""Algebraic norm of the element."""
return NotImplemented
@staticmethod
def _testfactor(p):
"""Test factor for use in factoring algorithm"""
return NotImplemented
def __floordiv__(self, other):
return divmod(self, other)[0]
def __mod__(self, other):
return divmod(self, other)[1]
def __str__(self):
a, b = self.a, self.b
if self.anorm() == 1:
return self._print_unit()
elif b == 0:
return str(a)
elif a == 0:
return '{}'.format(b) + self.symbol
elif b < 0:
return '({} - {}'.format(a, abs(b)) + self.symbol + ')'
else:
return '({} + {}'.format(a, b) + self.symbol + ')'
def _print_unit(self):
return self.unitmap[self.tuple]
def __repr__(self):
return str(self)
def gcd(self, other):
"""Greatest common denominator of this and another element."""
#print(self, other)
if not other:
return self
else:
return other.gcd(self % other)
def _factor(self):
"""Workhorse function for factoring. This is the Euclidean algorithm
that Euclidean domains get their name from."""
constructor = type(self)
an = self.anorm()
p = smallest_prime_factor(an)
if an <= 1 or an == p:
return [self]
elif p%self.mod == self.mod-1:
factor = constructor(p, 0)
left = self//factor
else:
testfactor = self._testfactor(p)
pfactor = constructor(p, 0)
factor = testfactor.gcd(pfactor)
if factor.anorm() == 1:
raise Exception('failed to find factor')
left, rem = divmod(self, factor)
if rem:
factor = factor.conjugate()
left, rem = divmod(self, factor)
if rem:
raise Exception('failed to find divisor')
return [factor] + left._factor()
def factor(self):
"""Factor this element."""
constructor = type(self)
one = constructor(1)
factors = self._factor()
nf = [f.normal_form()[0] for f in factors if f.anorm() > 1]
if nf:
backcalc = nf[-1]
for i in range(len(nf)-1):
f = nf[i]
if f.anorm() > 1:
backcalc *= f
else:
backcalc = one
unit = self//backcalc
if unit != one or not nf:
nf.append(unit)
return sorted(nf)
def normal_form(self):
"""Returns normal form of this element and how many multiplications
of the unit it takes to get from normal form to this form."""
if (self.a > 0 and self.b >= 0) or (self.a == 0 and self.b == 0):
return self, 0
else:
unit = type(self)(0, 1)
nm = self//unit
nf, n = nm.normal_form()
return nf, 1 + n
class Integer(EuclideanInteger):
"""Class representing regular, run-of-the-mill integers.
Args:
a: The integer.
b: Ignored, only present so the constructor has the same signature
as the other classes that inherit from EuclideanInteger.
See docs for EuclideanInteger for more information.
"""
mod = 0
unit = -1
b = 0
symbol = ''
unitmap = {(1, 0): '1',
(-1, 0): '-1'}
def __init__(self, a, b=0):
super().__init__(a, 0)
def anorm(self):
return abs(self.a)
def conjugate(self):
return self
def __mul__(self, other):
return Integer(self.a*other.a)
def __truediv__(self, other):
return Integer(self.a/other.a)
def __divmod__(self, other):
result = divmod(self.a, other.a)
return Integer(result[0]), Integer(result[1])
def gcd(self, other):
return Integer(gcd(self.a, other.a))
def factor(self):
an = self.anorm()
p = smallest_prime_factor(an)
if an <= 1 or an == p:
return [self]
else:
left = Integer(self.a/p)
return [Integer(p)] + left.factor()
def normal_form(self):
if self.a < 0:
return Integer(-self.a), 1
else:
return self, 0
class Gaussian(EuclideanInteger):
"""Class representing the Gaussian integers, a + bj where j is the
imaginary unit sqrt(-1) and a and b are integers."""
mod = 4
unit = 1j
symbol = 'j'
unitmap = {(1, 0): '1',
(0, 1): 'j',
(-1, 0): '-1',
(0, -1): '-j'}
def anorm(self):
return self.a**2 + self.b**2
def conjugate(self):
a, b = self.a, self.b
return type(self)(a, -b)
def __mul__(self, other):
a, b = self.a, self.b
c, d = other.a, other.b
return type(self)(a*c - b*d, b*c + a*d)
def __divmod__(self, other):
a, b = self.a, self.b
c, d = other.a, other.b
an = other.anorm()
if an == 0:
raise ZeroDivisionError()
x = (a*c+b*d)/an
y = (b*c-a*d)/an
x = math.floor(x) if x > 0 else math.ceil(x)
y = math.floor(y) if y > 0 else math.ceil(y)
div = type(self)(x, y)
mod = self - div*other
return div, mod
@classmethod
def _testfactor(cls, p):
exponent = (p - 1)//2
for n in range(1, p):
ksq = n**exponent + 1
if ksq % p == 0:
k = n**(exponent//2)
break
else:
msg = "couldn't find test factor for {} in the Gaussians".format(p)
raise Exception(msg)
return cls(k, 1)
class Eisenstein(EuclideanInteger):
"""Class representing the Eisenstein integers, a + bw where
w = exp(j*2pi/3) and a and b are integers.
See docs for EuclideanInteger for more information."""
mod = 3
unit = (-1 + 1j*math.sqrt(3))/2
symbol = 'w'
unitmap = {(1, 0): '1',
(1, 1): '-w^2',
(0, 1): 'w',
(-1, 0): '-1',
(-1, -1): 'w^2',
(0, -1): '-w'}
def anorm(self):
a, b = self.a, self.b
return a**2 - a*b + b**2
def conjugate(self):
a, b = self.a, self.b
return Eisenstein(a - b, -b)
def __mul__(self, other):
a, b = self.a, self.b
c, d = other.a, other.b
return Eisenstein(a*c - b*d, b*c + a*d - b*d)
def __divmod__(self, other):
a, b = self.a, self.b
c, d = other.a, other.b
an = other.anorm()
if an == 0:
raise ZeroDivisionError()
x = (a*c-a*d+b*d)/an
y = (b*c-a*d)/an
x = math.floor(x) if x > 0 else math.ceil(x)
y = math.floor(y) if y > 0 else math.ceil(y)
div = Eisenstein(x, y)
mod = self - div*other
return div, mod
def normal_form(self):
if ((self.a > 0 and self.b >= 0 and self.b <= self.a) or
(self.a == 0 and self.b == 0)):
return self, 0
else:
unit = Eisenstein(1, 1)
nm = self//unit
nf, n = nm.normal_form()
return nf, 1 + n
@classmethod
def _testfactor(cls, p):
for k in range(1, p**2):
kf = k**2 - k + 1
if kf % p == 0:
break
else:
msg = "couldn't find test factor for {} in the Eisensteins".format(p)
raise Exception(msg)
return cls(k, 1)
class Steineisen(EuclideanInteger):
"""Class representing the Steineisen integers,
a + bu where u = exp(j*pi/3) and a and b are integers. These are
isomorphic to the Eisenstein integers but this parameterization
is more convenient for use with gcopoly.py.
See docs for EuclideanInteger for more information.
"""
mod = 3
unit = (1 + 1j*math.sqrt(3))/2
symbol = 'u'
unitmap = {(1, 0): '1',
(0, 1): 'u',
(-1, 1): 'u^2',
(-1, 0): '-1',
(0, -1): '-u',
(1, -1): '-u^2'}
def anorm(self):
a, b = self.a, self.b
return a**2 + a*b + b**2
def conjugate(self):
a, b = self.a, self.b
return Steineisen(a + b, -b)
def __mul__(self, other):
a, b = self.a, self.b
c, d = other.a, other.b
return Steineisen(a*c - b*d, b*c + a*d + b*d)
def __divmod__(self, other):
a, b = self.a, self.b
c, d = other.a, other.b
an = other.anorm()
if an == 0:
raise ZeroDivisionError()
x = (a*c + a*d + b*d)/an
y = (b*c - a*d)/an
x = math.floor(x) if x > 0 else math.ceil(x)
y = math.floor(y) if y > 0 else math.ceil(y)
div = Steineisen(x, y)
mod = self - div*other
return div, mod
@classmethod
def _testfactor(cls, p):
for k in range(0, p**2):
kf = k**2 + k + 1
if kf % p == 0:
break
else:
msg = "couldn't find test factor for {} in the Steineisens".format(p)
raise Exception(msg)
return cls(k, 1)
|
brsr/antitile
|
antitile/factor.py
|
Python
|
mit
| 11,837
|
[
"Gaussian"
] |
662b5aff89cd1d8da805b819a1aab200f2e4187870e1c6ccb8b47edd0a336821
|
#!/usr/bin/python
#
# (C) 2013, Markus Wildi
#
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
"""Config provides all required constants with default values. ToDo: This module must be rewritten in future.
"""
__author__ = 'wildi.markus@bluewin.ch'
import ConfigParser
import os
import string
# thanks http://stackoverflow.com/questions/635483/what-is-the-best-way-to-implement-nested-dictionaries-in-python
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
class DefaultConfiguration(object):
"""Default configuration for rts2saf"""
def __init__(self, debug=False, logger=None):
self.debug=debug
self.logger=logger
self.ccd=None
self.foc=None
self.sexFields=list()
self.config = ConfigParser.RawConfigParser()
self.config.optionxform = str
self.dcf=dict()
self.dcf[('basic', 'BASE_DIRECTORY')]= '/tmp/rts2saf_focus'
self.dcf[('basic', 'TEMP_DIRECTORY')]= '/tmp/'
self.dcf[('basic', 'FILE_GLOB')]= '*fits'
self.dcf[('filter wheels', 'inuse')]= '[ FILTA ]'
self.dcf[('filter wheels', 'EMPTY_SLOT_NAMES')]= [ 'empty8', 'open' ]
# this is really ugly
# but ConfigParser does not allow something else
# ToDo define more!
self.dcf[('filter wheel', 'fltw1')]= '[ FILTA, U, nof]'
self.dcf[('filter wheel', 'fltw2')]= '[ FILTB, Y ]'
self.dcf[('filter wheel', 'fltw3')]= '[ FILTC, nof ]'
self.dcf[('filter wheel', 'fltw4')]= '[ FILTD, nof ]'
#
# relative lower acquisition limit [tick]
# relative upper acquisition limit [tick]
# stepsize [tick]
# exposure factor
self.dcf[('filter properties', 'flt1')]= '[ U, -1000, 1100, 100, 11.1]'
self.dcf[('filter properties', 'flt2')]= '[ nof1,-1200, 1300, 200, 1.]'
self.dcf[('filter properties', 'flt3')]= '[ nof2,-1200, 1300, 200, 1.]'
self.dcf[('filter properties', 'flt4')]= '[ C, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt5')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt6')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt7')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt8')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt9')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('focuser properties', 'FOCUSER_NAME')]= 'F0'
self.dcf[('focuser properties', 'FOCUSER_RESOLUTION')]= 20
self.dcf[('focuser properties', 'FOCUSER_ABSOLUTE_LOWER_LIMIT')]= 0
self.dcf[('focuser properties', 'FOCUSER_ABSOLUTE_UPPER_LIMIT')]= 20
self.dcf[('focuser properties', 'FOCUSER_LOWER_LIMIT')]= 0
self.dcf[('focuser properties', 'FOCUSER_UPPER_LIMIT')]= 20
self.dcf[('focuser properties', 'FOCUSER_STEP_SIZE')]= 2
self.dcf[('focuser properties', 'FOCUSER_SPEED')]= 100.
self.dcf[('focuser properties', 'FOCUSER_NO_FTW_RANGE')]= '[ -100, 100, 20 ]'
self.dcf[('focuser properties', 'FOCUSER_TEMPERATURE_COMPENSATION')]= False
# not yet in use:
self.dcf[('acceptance circle', 'CENTER_OFFSET_X')]= 0.
self.dcf[('acceptance circle', 'CENTER_OFFSET_Y')]= 0.
#
self.dcf[('acceptance circle', 'RADIUS')]= 2000.
#
#
self.dcf[('analysis', 'MINIMUM_OBJECTS')]= 5
self.dcf[('analysis', 'MINIMUM_FOCUSER_POSITIONS')]= 5
# if non empty only FOC_POS within this interval will be analyzed
self.dcf[('analysis', 'FOCUSER_INTERVAL')]= list()
self.dcf[('SExtractor', 'SEXPATH')]= 'sextractor'
self.dcf[('SExtractor', 'SEXCFG')]= '/usr/local/etc/rts2/rts2saf/sex/rts2saf-sex.cfg'
self.dcf[('SExtractor', 'FIELDS')]= ['NUMBER', 'EXT_NUMBER','X_IMAGE','Y_IMAGE','MAG_BEST','FLAGS','CLASS_STAR','FWHM_IMAGE','A_IMAGE','B_IMAGE']
# ToDo, currently put into default sex.fg
# from sextractor config file
# ASSOC_PARAMS 3,4 # columns of xpos,ypos[,mag] # rts2af do not use mag
# ASSOC_RADIUS 10.0 # cross-matching radius (pixels)
# ASSOC_TYPE NEAREST # ASSOCiation method: FIRST, NEAREST, MEAN,
self.dcf[('SExtractor', 'OBJECT_SEPARATION')]= 10.
self.dcf[('SExtractor', 'ELLIPTICITY')]= .1
self.dcf[('SExtractor', 'ELLIPTICITY_REFERENCE')]= .3
self.dcf[('SExtractor', 'DETECT_THRESH')]=1.7
self.dcf[('SExtractor', 'ANALYSIS_THRESH')]=1.7
self.dcf[('SExtractor', 'DEBLEND_MINCONT')]= 0.1
self.dcf[('SExtractor', 'SATUR_LEVEL')]= 65535
self.dcf[('SExtractor', 'STARNNW_NAME')]= '/usr/local/etc/rts2/rts2saf/rts2saf-sex.nnw'
# mapping as found in dummy CCD, used for set
self.dcf[('ccd binning mapping', '1x1')] = 0
self.dcf[('ccd binning mapping', '2x2')] = 1
self.dcf[('ccd binning mapping', '3x3')] = 2
self.dcf[('ccd binning mapping', '4x4')] = 3
self.dcf[('ccd', 'CCD_NAME')]= 'CD'
self.dcf[('ccd', 'CCD_BINNING')]= '1x1'
self.dcf[('ccd', 'WINDOW')]= '[ -1, -1, -1, -1 ]'
self.dcf[('ccd', 'PIXELSIZE')]= 9.e-6 # unit meter
self.dcf[('ccd', 'PIXELSCALE')]= 1.1 # unit arcsec/pixel
self.dcf[('ccd', 'BASE_EXPOSURE')]= .01
self.dcf[('mode', 'SET_FOC_DEF')]= False
self.dcf[('mode', 'WRITE_FILTER_OFFSETS')]= True
# ToDo, make a real alternative
# self.dcf[('mode', 'ANALYZE_FWHM')]= True
self.dcf[('mode', 'ANALYZE_FLUX')]= False
self.dcf[('mode', 'ANALYZE_ASSOC')]= False
self.dcf[('mode', 'ANALYZE_ASSOC_FRACTION')]= 0.65
self.dcf[('mode', 'WITH_MATHPLOTLIB')]= False
self.dcf[('mode', 'WEIGHTED_MEANS')]= False
# mapping of fits header elements to canonical
self.dcf[('fits header mapping', 'AMBIENTTEMPERATURE')]= 'HIERARCH DAVIS.DOME_TMP'
self.dcf[('fits header mapping', 'DATETIME')]= 'JD'
self.dcf[('fits header mapping', 'EXPOSURE')]= 'EXPOSURE'
self.dcf[('fits header mapping', 'CCD_TEMP')]= 'CCD_TEMP'
self.dcf[('fits header mapping', 'FOC_POS')] = 'FOC_POS'
self.dcf[('fits header mapping', 'DATE-OBS')]= 'DATE-OBS'
self.dcf[('fits header mapping', 'BINNING')]= 'BINNING'
self.dcf[('fits header mapping', 'BINNING_X')]= 'BIN_V' # seen BIN_X
self.dcf[('fits header mapping', 'BINNING_Y')]= 'BIN_H' # seen BIN_Y
# These factors are used for fitting
self.dcf[('fits binning mapping', '1x1')]= 1
self.dcf[('fits binning mapping', '2x2')]= 2
self.dcf[('fits binning mapping', '4x4')]= 4
self.dcf[('fits binning mapping', '8x8')]= 8
self.dcf[('telescope', 'TEL_RADIUS')] = 0.09 # [meter]
self.dcf[('telescope', 'TEL_FOCALLENGTH')] = 1.26 # [meter]
self.dcf[('connection', 'URL')] = 'http://127.0.0.1:8889'
self.dcf[('connection', 'RTS2_HTTPD_USERNAME')] = 'rts2saf'
self.dcf[('connection', 'PASSWORD')] = 'set password in your config file'
self.dcf[('queue focus run', 'FWHM_LOWER_THRESH')] = 35.
self.dcf[('analysis', 'FWHM_MIN')] = 1.5
self.dcf[('analysis', 'FWHM_MAX')] = 12.
self.dcf[('IMGP analysis', 'FILTERS_TO_EXCLUDE')] = '[ FILTC:grism1]'
self.dcf[('IMGP analysis', 'SCRIPT_FWHM')] = '/usr/local/bin/rts2saf_fwhm.py'
self.dcf[('IMGP analysis', 'SCRIPT_ASTROMETRY')] = '/usr/local/bin/rts2-astrometry.net'
# or rts2-astrometry.net
def writeDefaultConfiguration(self, cfn='./rts2saf-default.cfg'):
"""Write the default configuration to file, serves as a starting point.
:param cfn: file name
:type string:
:return cfn: file name if success else None
"""
for (section, identifier), value in sorted(self.dcf.iteritems()):
if self.config.has_section(section)== False:
self.config.add_section(section)
self.config.set(section, identifier, value)
try:
with open( cfn, 'w') as configfile:
configfile.write('# 2013-09-10, Markus Wildi\n')
configfile.write('# default configuration for rts2saf\n')
configfile.write('#\n')
configfile.write('#\n')
self.config.write(configfile)
except Exception, e:
self.logger.error('Configuration.writeDefaultConfiguration: config file: {0} could not be written, error: {1}'.format(cfn,e))
return None
return cfn
class Configuration(DefaultConfiguration):
"""Helper class containing the runtime configuration.
"""
# init from base class
def readConfiguration(self, fileName=None):
"""Copy the default configuration and overwrite the values with those from configuration file.
:return: True if success else False
"""
# make the values accessible
self.cfg=AutoVivification()
# TODO
filterWheelsInuse=list()
filterWheelsDefs=dict()
config = ConfigParser.ConfigParser()
config.optionxform = str
if os.path.exists(fileName):
try:
config.readfp(open(fileName))
except Exception, e:
self.logger.error('Configuration.readConfiguration: config file: {0} has wrong syntax, error: {1}'.format(fileName,e))
return False
# ok, I misuse ConfigParser
# check additional elements or typo
for sct in config.sections():
for k,v in config.items(sct):
try:
self.dcf[(sct, k)]
except Exception, e:
self.logger.error('Configuration.readConfiguration: config file: {0} has wrong syntax, error: {1}'.format(fileName,e))
return False
else:
self.logger.error('Configuration.readConfiguration: config file: {0} not found'.format(fileName))
return False
self.cfg['CFGFN'] = fileName
# read the defaults
for (section, identifier), value in self.dcf.iteritems():
#
# ToDO ugly
if section == 'ccd' :
self.cfg[identifier]= value
elif section in 'fits binning mapping' or section in 'ccd binning mapping':
self.cfg[section][identifier]= value
else:
self.cfg[identifier]= value
# over write the defaults
ftds=list()
# if there is no filter wheel defined, a FAKE wheel with one FAKE filter is created
fakeFtw=True
for (section, identifier), value in self.dcf.iteritems():
try:
value = config.get( section, identifier)
except Exception, e:
# exception if section, identifier value are not present in config file
#self.logger.error('Configuration.readConfiguration: config file: {0} has an error at section:{1}, identifier:{2}, value:{3}'.format(fileName, section, identifier, value))
continue
value= string.replace( value, ' ', '')
items=list()
# decode the compound configuration expressions first, the rest is copied to
# after completion
if section=='SExtractor':
if identifier in 'FIELDS':
value=value.replace("'", '')
self.cfg['FIELDS']=filter(None,value[1:-1].split(','))
else:
self.cfg[identifier]= value
elif section=='basic':
if isinstance(self.cfg[identifier], bool):
# ToDo, looking for a direct way
if value in 'True':
self.cfg[identifier]= True
else:
self.cfg[identifier]= False
else:
self.cfg[identifier]= value
elif section=='focuser properties':
if identifier in 'FOCUSER_NO_FTW_RANGE':
self.cfg[identifier]=filter(None,value[1:-1].split(','))
else:
self.cfg[identifier]= value
#
elif section=='filter properties':
self.cfg[identifier]= value
ftds.append(value)
#
elif section=='filter wheel':
items= filter(None,value[1:-1].split(','))
filterWheelsDefs[items[0]]=[ x for x in items[1:] if x is not '']
#
elif( section=='filter wheels'):
fakeFtw=False
if identifier in 'inuse':
filterWheelsInuse=filter(None,value[1:-1].split(','))
self.cfg[identifier]=filterWheelsInuse
elif identifier in 'EMPTY_SLOT_NAMES':
self.cfg[identifier]=filter(None,value[1:-1].split(','))
#
elif( section == 'ccd' and identifier == 'WINDOW'):
items= filter(None,value[1:-1].split(','))
self.cfg[identifier] = [ int(x) for x in items ]
if len(self.cfg[identifier]) != 4:
self.logger.warn( 'Configuration.readConfiguration: wrong ccd window specification {0} {1}, using the whole CCD area'.format(len(self.cfg[identifier]), self.cfg[identifier]))
self.cfg[identifier] = [ -1, -1, -1, -1]
elif( section=='analysis') and identifier == 'FOCUSER_INTERVAL':
items= filter(None,value[1:-1].split(','))
self.cfg[identifier] = [ int(x) for x in items ]
if len(self.cfg[identifier]) != 2:
self.logger.warn( 'Configuration.readConfiguration: wrong focuser interval specification {0} {1}, using all images'.format(len(self.cfg[identifier]), self.cfg[identifier]))
self.cfg[identifier] = list()
elif( section=='IMGP analysis'):
items= filter(None,value[1:-1].split(','))
if identifier in 'FILTERS_TO_EXCLUDE':
tDict=dict()
for e in filter(None,value[1:-1].split(',')):
k,v=e.split(':')
tDict[v]=k # that's ok !!
self.cfg[identifier]=tDict
else:
self.cfg[identifier]= value
elif( section=='fits binning mapping'):
# exception
self.cfg[section][identifier]= value
elif( section=='ccd binning mapping'):
# exception
self.cfg[section][identifier]= value
# first bool, then int !
elif isinstance(self.cfg[identifier], bool):
# ToDo, looking for a direct way
if value in 'True':
self.cfg[identifier]= True
else:
self.cfg[identifier]= False
elif( isinstance(self.cfg[identifier], int)):
try:
self.cfg[identifier]= int(value)
except Exception, e:
self.logger.error('Configuration.readConfiguration: no int '+ value+ ' in section ' + section + ', identifier ' + identifier + ' in file ' + fileName+ ', error: {0}'.format(e))
elif(isinstance(self.cfg[identifier], float)):
try:
self.cfg[identifier]= float(value)
except Exception, e:
self.logger.error('Configuration.readConfiguration: no float '+ value+ 'in section ' + section + ', identifier ' + identifier + ' in file ' + fileName + ', error: {0}'.format(e))
else:
self.cfg[identifier]= value
# for convenience
# ToDo look!
self.cfg['FAKE'] = fakeFtw
if self.cfg['FAKE']:
self.cfg['FILTER DEFINITIONS'] = ['FAKE_FT']
self.cfg['FILTER WHEEL DEFINITIONS'] = {'FAKE_FTW': [ 'FAKE_FT'] }
self.cfg['FILTER WHEELS INUSE'] = [ 'FAKE_FTW' ]
else:
self.cfg['FILTER DEFINITIONS'] = ftds
self.cfg['FILTER WHEEL DEFINITIONS'] = filterWheelsDefs
self.cfg['FILTER WHEELS INUSE'] = filterWheelsInuse
self.cfg['FITS_BINNING_MAPPING'] = self.cfg['fits binning mapping']
self.cfg['CCD_BINNING_MAPPING'] = self.cfg['ccd binning mapping']
return True
def writeConfiguration(self, cfn='./rts2saf-my-new.cfg'):
for (section, identifier), value in sorted(self.dcf.iteritems()):
print section, '=>', identifier, '=>', value
if self.config.has_section(section)== False:
self.config.add_section(section)
self.config.set(section, identifier, value)
with open( cfn, 'w') as configfile:
configfile.write(' 2013-09-10, Markus Wildi\n')
configfile.write(' default configuration for rts2saf\n')
configfile.write('\n')
configfile.write('\n')
self.config.write(configfile)
def checkConfiguration(self, args=None):
"""Check the runtime configuration e.g. if SExtractor is present or if the filter wheel definitions and filters are consistent.
:return: True if success else False
"""
# rts2.sextractor excepts the file not found error and uses internal defaults, we check that here
if not os.path.exists(self.cfg['SEXPATH']):
self.logger.warn( 'Configuration.checkConfiguration: sextractor path: {0} not valid, returning'.format(self.cfg['SEXPATH']))
return False
if not os.path.exists(self.cfg['SEXCFG']):
self.logger.warn( 'Configuration.checkConfiguration: SExtractor config file: {0} not found, returning'.format(self.cfg['SEXCFG']))
return False
if not os.path.exists(self.cfg['STARNNW_NAME']):
self.logger.warn( 'Configuration.checkConfiguration: SExtractor NNW config file: {0} not found, returning'.format(self.cfg['STARNNW_NAME']))
return False
if not self.cfg['FIELDS']:
self.logger.warn( 'Configuration.checkConfiguration: no sextractor fields defined, returning')
return False
ftws = self.cfg['FILTER WHEEL DEFINITIONS'].keys()
fts=list()
for x in self.cfg['FILTER DEFINITIONS']:
ele= filter(None,x.strip('[]').split(','))
fts.append(ele[0])
for ftw in self.cfg['FILTER WHEELS INUSE']:
if ftw not in ftws:
self.logger.warn( 'Configuration.checkConfiguration: filter wheel: {} not defined in: {}'.format(ftw, ftws))
return False
for ftName in self.cfg['FILTER WHEEL DEFINITIONS'][ftw]:
if ftName not in fts:
self.logger.warn( 'Configuration.checkConfiguration: filter: {} not defined in: {}'.format(ftName, self.cfg['FILTER DEFINITIONS']))
return False
try:
vars(args)['associate']
if not 'NUMBER' in self.cfg['FIELDS']:
self.logger.error( 'Configuration.checkConfiguration: with --associate specify SExtractor parameter NUMBER in FIELDS: {}'.format( self.cfg['FIELDS']))
return False
except:
pass
try:
vars(args)['flux']
for fld in ['FLUX_MAX' , 'FLUX_APER', 'FLUXERR_APER']:
if fld in self.cfg['FIELDS']:
self.logger.error( 'Configuration.checkConfiguration: with --flux do not specify SExtractor parameter: {} in FIELDS: {}'.format( fld, self.cfg['FIELDS']))
return False
except:
pass
return True
# more to come
|
RTS2/rts2
|
scripts/rts2saf/rts2saf/config.py
|
Python
|
lgpl-3.0
| 21,229
|
[
"VisIt"
] |
bf944c6f23aeac06af78e49109e1b14e1d7fec1f686c9deb909afd56ba0740f3
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for adding automatically tracked values to Tensorboard.
Autosummary creates an identity op that internally keeps track of the input
values and automatically shows up in TensorBoard. The reported value
represents an average over input components. The average is accumulated
constantly over time and flushed when save_summaries() is called.
Notes:
- The output tensor must be used as an input for something else in the
graph. Otherwise, the autosummary op will not get executed, and the average
value will not get accumulated.
- It is perfectly fine to include autosummaries with the same name in
several places throughout the graph, even if they are executed concurrently.
- It is ok to also pass in a python scalar or numpy array. In this case, it
is added to the average immediately.
"""
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2
from . import tfutil
from .tfutil import TfExpression
from .tfutil import TfExpressionEx
_dtype = tf.float64
_vars = OrderedDict() # name => [var, ...]
_immediate = OrderedDict() # name => update_op, update_value
_finalized = False
_merge_op = None
def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
"""Internal helper for creating autosummary accumulators."""
assert not _finalized
name_id = name.replace("/", "_")
v = tf.cast(value_expr, _dtype)
if v.shape.is_fully_defined():
size = np.prod(tfutil.shape_to_list(v.shape))
size_expr = tf.constant(size, dtype=_dtype)
else:
size = None
size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
if size == 1:
if v.shape.ndims != 0:
v = tf.reshape(v, [])
v = [size_expr, v, tf.square(v)]
else:
v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None):
var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _vars:
_vars[name].append(var)
else:
_vars[name] = [var]
return update_op
def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None) -> TfExpressionEx:
"""Create a new autosummary.
Args:
name: Name to use in TensorBoard
value: TensorFlow expression or python value to track
passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node.
Example use of the passthru mechanism:
n = autosummary('l2loss', loss, passthru=n)
This is a shorthand for the following code:
with tf.control_dependencies([autosummary('l2loss', loss)]):
n = tf.identity(n)
"""
tfutil.assert_tf_initialized()
name_id = name.replace("/", "_")
if tfutil.is_tf_expression(value):
with tf.name_scope("summary_" + name_id), tf.device(value.device):
update_op = _create_var(name, value)
with tf.control_dependencies([update_op]):
return tf.identity(value if passthru is None else passthru)
else: # python scalar or numpy array
if name not in _immediate:
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(_dtype)
update_op = _create_var(name, update_value)
_immediate[name] = update_op, update_value
update_op, update_value = _immediate[name]
tfutil.run(update_op, {update_value: value})
return value if passthru is None else passthru
def finalize_autosummaries() -> None:
"""Create the necessary ops to include autosummaries in TensorBoard report.
Note: This should be done only once per graph.
"""
global _finalized
tfutil.assert_tf_initialized()
if _finalized:
return None
_finalized = True
tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
# Create summary ops.
with tf.device(None), tf.control_dependencies(None):
for name, vars_list in _vars.items():
name_id = name.replace("/", "_")
with tfutil.absolute_name_scope("Autosummary/" + name_id):
moments = tf.add_n(vars_list)
moments /= moments[0]
with tf.control_dependencies([moments]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
mean = moments[1]
std = tf.sqrt(moments[2] - tf.square(moments[1]))
tf.summary.scalar(name, mean)
tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)
# Group by category and chart name.
cat_dict = OrderedDict()
for series_name in sorted(_vars.keys()):
p = series_name.split("/")
cat = p[0] if len(p) >= 2 else ""
chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
if cat not in cat_dict:
cat_dict[cat] = OrderedDict()
if chart not in cat_dict[cat]:
cat_dict[cat][chart] = []
cat_dict[cat][chart].append(series_name)
# Setup custom_scalar layout.
categories = []
for cat_name, chart_dict in cat_dict.items():
charts = []
for chart_name, series_names in chart_dict.items():
series = []
for series_name in series_names:
series.append(layout_pb2.MarginChartContent.Series(
value=series_name,
lower="xCustomScalars/" + series_name + "/margin_lo",
upper="xCustomScalars/" + series_name + "/margin_hi"))
margin = layout_pb2.MarginChartContent(series=series)
charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
categories.append(layout_pb2.Category(title=cat_name, chart=charts))
layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
return layout
def save_summaries(file_writer, global_step=None):
"""Call FileWriter.add_summary() with all summaries in the default graph,
automatically finalizing and merging them on the first call.
"""
global _merge_op
tfutil.assert_tf_initialized()
if _merge_op is None:
layout = finalize_autosummaries()
if layout is not None:
file_writer.add_summary(layout)
with tf.device(None), tf.control_dependencies(None):
_merge_op = tf.summary.merge_all()
file_writer.add_summary(_merge_op.eval(), global_step)
|
microsoft/DiscoFaceGAN
|
dnnlib/tflib/autosummary.py
|
Python
|
mit
| 7,537
|
[
"VisIt"
] |
a44808bc9198d10b7e8be52fab2fa6d9c484e486d7862ae216fc84c80d502df3
|
"""Download and install structured genome data and aligner index files.
Downloads prepared FASTA, indexes for aligners like BWA, Bowtie and novoalign
and other genome data in automated pipelines. Specify the genomes and aligners
to use in an input biodata.yaml configuration file.
The main targets are fabric functions:
- install_data -- Install biological data from scratch, including indexing genomes.
- install_data_s3 -- Install biological data, downloading pre-computed indexes from S3.
- upload_s3 -- Upload created indexes to biodata S3 bucket.
"""
import os
import operator
import socket
import subprocess
from math import log
from fabric.api import *
from fabric.contrib.files import *
from fabric.context_managers import path
try:
import yaml
except ImportError:
yaml = None
try:
import boto
except ImportError:
boto = None
from cloudbio.biodata import galaxy, ggd
from cloudbio.biodata.dbsnp import download_dbsnp
from cloudbio.biodata.rnaseq import download_transcripts
from cloudbio.custom import shared
from cloudbio.fabutils import quiet
import multiprocessing as mp
# -- Configuration for genomes to download and prepare
class _DownloadHelper:
def __init__(self):
self.config = {}
def ucsc_name(self):
return None
def _exists(self, fname, seq_dir):
"""Check if a file exists in either download or final destination.
"""
return env.safe_exists(fname) or env.safe_exists(os.path.join(seq_dir, fname))
class UCSCGenome(_DownloadHelper):
def __init__(self, genome_name, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "UCSC"
self._name = genome_name
self.dl_name = dl_name if dl_name is not None else genome_name
self._url = "ftp://hgdownload.cse.ucsc.edu/goldenPath/%s/bigZips" % \
genome_name
def ucsc_name(self):
return self._name
def _karyotype_sort(self, xs):
"""Sort reads in karyotypic order to work with GATK's defaults.
"""
def karyotype_keyfn(x):
base = os.path.splitext(os.path.basename(x))[0]
if base.startswith("chr"):
base = base[3:]
parts = base.split("_")
try:
parts[0] = int(parts[0])
except ValueError:
pass
# unplaced at the very end
if parts[0] == "Un":
parts.insert(0, "z")
# mitochondrial special case -- after X/Y
elif parts[0] in ["M", "MT"]:
parts.insert(0, "x")
# sort random and extra chromosomes after M
elif len(parts) > 1:
parts.insert(0, "y")
return parts
return sorted(xs, key=karyotype_keyfn)
def _split_multifasta(self, fasta_file):
chrom = ""
file_handle = None
file_names = []
out_dir = os.path.dirname(fasta_file)
with open(fasta_file) as in_handle:
for line in in_handle:
if line.startswith(">"):
chrom = line.split(">")[1].strip()
file_handle.close() if file_handle else None
file_names.append(chrom + ".fa")
file_handle = open(os.path.join(out_dir, chrom + ".fa"), "w")
file_handle.write(line)
else:
file_handle.write(line)
file_handle.close()
return file_names
def download(self, seq_dir):
zipped_file = None
genome_file = "%s.fa" % self._name
if not self._exists(genome_file, seq_dir):
prep_dir = "seq_prep"
env.safe_run("mkdir -p %s" % prep_dir)
with cd(prep_dir):
zipped_file = self._download_zip(seq_dir)
if zipped_file.endswith(".tar.gz"):
env.safe_run("tar -xzpf %s" % zipped_file)
elif zipped_file.endswith(".zip"):
env.safe_run("unzip %s" % zipped_file)
elif zipped_file.endswith(".gz"):
if not env.safe_exists("out.fa"):
env.safe_run("gunzip -c %s > out.fa" % zipped_file)
else:
raise ValueError("Do not know how to handle: %s" % zipped_file)
tmp_file = genome_file.replace(".fa", ".txt")
result = env.safe_run_output("find `pwd` -name '*.fa'")
result = [x.strip() for x in result.split("\n")]
if len(result) == 1:
orig_result = result[0]
result = self._split_multifasta(result[0])
env.safe_run("rm %s" % orig_result)
result = self._karyotype_sort(result)
env.safe_run("rm -f inputs.txt")
for fname in result:
with quiet():
env.safe_run("echo '%s' >> inputs.txt" % fname)
env.safe_run("cat `cat inputs.txt` > %s" % (tmp_file))
for fname in result:
with quiet():
env.safe_run("rm -f %s" % fname)
env.safe_run("mv %s %s" % (tmp_file, genome_file))
zipped_file = os.path.join(prep_dir, zipped_file)
genome_file = os.path.join(prep_dir, genome_file)
return genome_file, [zipped_file]
def _download_zip(self, seq_dir):
for zipped_file in ["chromFa.tar.gz", "%s.fa.gz" % self._name,
"chromFa.zip"]:
if not self._exists(zipped_file, seq_dir):
result = shared._remote_fetch(env, "%s/%s" % (self._url, zipped_file), allow_fail=True)
if result:
break
else:
break
return zipped_file
class NCBIRest(_DownloadHelper):
"""Retrieve files using the TogoWS REST server pointed at NCBI.
"""
def __init__(self, name, refs, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "NCBI"
self._name = name
self._refs = refs
self.dl_name = dl_name if dl_name is not None else name
self._base_url = "http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta"
def download(self, seq_dir):
genome_file = "%s.fa" % self._name
if not self._exists(genome_file, seq_dir):
for ref in self._refs:
shared._remote_fetch(env, self._base_url % ref)
env.safe_run("ls -l")
env.safe_sed('%s.fasta' % ref, '^>.*$', '>%s' % ref, '1')
tmp_file = genome_file.replace(".fa", ".txt")
env.safe_run("cat *.fasta > %s" % tmp_file)
env.safe_run("rm -f *.fasta")
env.safe_run("rm -f *.bak")
env.safe_run("mv %s %s" % (tmp_file, genome_file))
return genome_file, []
class VectorBase(_DownloadHelper):
"""Retrieve genomes from VectorBase) """
def __init__(self, name, genus, species, strain, release, assembly_types):
_DownloadHelper.__init__(self)
self._name = name
self.data_source = "VectorBase"
self._base_url = ("http://www.vectorbase.org/sites/default/files/ftp/"
"downloads/")
_base_file = ("{genus}-{species}-{strain}_{assembly}"
"_{release}.fa.gz")
self._to_get = []
for assembly in assembly_types:
self._to_get.append(_base_file.format(**locals()))
def download(self, seq_dir):
print os.getcwd()
genome_file = "%s.fa" % self._name
for fn in self._to_get:
url = self._base_url + fn
if not self._exists(fn, seq_dir):
shared._remote_fetch(env, url)
env.safe_run("gunzip -c %s >> %s" % (fn, genome_file))
return genome_file, []
class EnsemblGenome(_DownloadHelper):
"""Retrieve genome FASTA files from Ensembl.
ftp://ftp.ensemblgenomes.org/pub/plants/release-22/fasta/
arabidopsis_thaliana/dna/Arabidopsis_thaliana.TAIR10.22.dna.toplevel.fa.gz
ftp://ftp.ensembl.org/pub/release-75/fasta/
caenorhabditis_elegans/dna/Caenorhabditis_elegans.WBcel235.75.dna.toplevel.fa.gz
ftp://ftp.ensemblgenomes.org/pub/bacteria/release-23/bacteria/fasta/
bacteria_17_collection/pseudomonas_aeruginosa_ucbpp_pa14/dna/
Pseudomonas_aeruginosa_ucbpp_pa14.GCA_000014625.1.23.dna.toplevel.fa.gz
"""
def __init__(self, ensembl_section, release, organism, name, subsection=None):
_DownloadHelper.__init__(self)
self.data_source = "Ensembl"
if ensembl_section == "standard":
url = "ftp://ftp.ensembl.org/pub/"
else:
url = "ftp://ftp.ensemblgenomes.org/pub/%s/" % ensembl_section
url += "release-%s/fasta/" % release
if subsection:
url += "%s/" % subsection
url += "%s/dna/" % organism.lower()
self._url = url
if ensembl_section == "standard":
self._get_file = "%s.%s.dna.toplevel.fa.gz" % (organism, name)
else:
self._get_file = "%s.%s.%s.dna.toplevel.fa.gz" % (organism, name, release)
self._name = name
self.dl_name = name
def download(self, seq_dir):
genome_file = "%s.fa" % self._name
if not self._exists(self._get_file, seq_dir):
shared._remote_fetch(env, "%s%s" % (self._url, self._get_file))
if not self._exists(genome_file, seq_dir):
env.safe_run("gunzip -c %s > %s" % (self._get_file, genome_file))
return genome_file, [self._get_file]
class BroadGenome(_DownloadHelper):
"""Retrieve genomes organized and sorted by Broad for use with GATK.
Uses the UCSC-name compatible versions of the GATK bundles.
"""
def __init__(self, name, bundle_version, target_fasta, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "UCSC"
self._name = name
self.dl_name = dl_name if dl_name is not None else name
self._target = target_fasta
self._ftp_url = "ftp://gsapubftp-anonymous:@ftp.broadinstitute.org/bundle/" + \
"{ver}/{org}/".format(ver=bundle_version, org=self.dl_name)
def download(self, seq_dir):
org_file = "%s.fa" % self._name
if not self._exists(org_file, seq_dir):
shared._remote_fetch(env, "%s%s.gz" % (self._ftp_url, self._target))
env.safe_run("gunzip %s.gz" % self._target)
env.safe_run("mv %s %s" % (self._target, org_file))
return org_file, []
class GGDGenome:
"""Genome with download specified via a GGD recipe.
"""
def __init__(self, name):
self._name = name
BROAD_BUNDLE_VERSION = "2.8"
DBSNP_VERSION = "138"
GENOMES_SUPPORTED = [
("phiX174", "phix", NCBIRest("phix", ["NC_001422.1"])),
("Scerevisiae", "sacCer3", UCSCGenome("sacCer3")),
("Mmusculus", "mm10", UCSCGenome("mm10")),
("Mmusculus", "mm9", UCSCGenome("mm9")),
("Mmusculus", "mm8", UCSCGenome("mm8")),
("Hsapiens", "hg18", BroadGenome("hg18", BROAD_BUNDLE_VERSION,
"Homo_sapiens_assembly18.fasta")),
("Hsapiens", "hg19", BroadGenome("hg19", BROAD_BUNDLE_VERSION,
"ucsc.hg19.fasta")),
("Hsapiens", "GRCh37", BroadGenome("GRCh37", BROAD_BUNDLE_VERSION,
"human_g1k_v37.fasta", "b37")),
("Hsapiens", "hg38", GGDGenome("hg38")),
("Hsapiens", "hg38-noalt", GGDGenome("hg38-noalt")),
("Rnorvegicus", "rn6", UCSCGenome("rn6")),
("Rnorvegicus", "rn5", UCSCGenome("rn5")),
("Rnorvegicus", "rn4", UCSCGenome("rn4")),
("Xtropicalis", "xenTro3", UCSCGenome("xenTro3")),
("Athaliana", "TAIR10", EnsemblGenome("plants", "26",
"Arabidopsis_thaliana", "TAIR10")),
("Dmelanogaster", "dm3", UCSCGenome("dm3")),
("Celegans", "WBcel235", EnsemblGenome("standard", "80",
"Caenorhabditis_elegans", "WBcel235")),
("Mtuberculosis_H37Rv", "mycoTube_H37RV", NCBIRest("mycoTube_H37RV",
["NC_000962"])),
("Msmegmatis", "92", NCBIRest("92", ["NC_008596.1"])),
("Paeruginosa_UCBPP-PA14", "pseudomonas_aeruginosa_ucbpp_pa14",
EnsemblGenome("bacteria", "26", "Pseudomonas_aeruginosa_ucbpp_pa14",
"GCA_000014625.1", "bacteria_17_collection")),
("Ecoli", "eschColi_K12", NCBIRest("eschColi_K12", ["U00096.2"])),
("Amellifera_Honeybee", "apiMel3", UCSCGenome("apiMel3")),
("Cfamiliaris_Dog", "canFam3", UCSCGenome("canFam3")),
("Cfamiliaris_Dog", "canFam2", UCSCGenome("canFam2")),
("Drerio_Zebrafish", "Zv9", EnsemblGenome("standard", "80", "Danio_rerio", "Zv9")),
("Drerio_Zebrafish", "GRCz10", EnsemblGenome("standard", "81", "Danio_rerio", "GRCz10")),
("Ecaballus_Horse", "equCab2", UCSCGenome("equCab2")),
("Fcatus_Cat", "felCat3", UCSCGenome("felCat3")),
("Ggallus_Chicken", "galGal4", UCSCGenome("galGal4")),
("Tguttata_Zebra_finch", "taeGut1", UCSCGenome("taeGut1")),
("Aalbimanus", "AalbS1", VectorBase("AalbS1", "Anopheles",
"albimanus", "STECLA",
"AalbS1", ["SCAFFOLDS"])),
("Agambiae", "AgamP3", VectorBase("AgamP3", "Anopheles",
"gambiae", "PEST",
"AgamP3", ["CHROMOSOMES"])),]
GENOME_INDEXES_SUPPORTED = ["bowtie", "bowtie2", "bwa", "maq", "novoalign", "novoalign-cs",
"ucsc", "mosaik", "snap", "star", "rtg"]
DEFAULT_GENOME_INDEXES = ["seq"]
# -- Fabric instructions
def _check_version():
version = env.version
if int(version.split(".")[0]) < 1:
raise NotImplementedError("Please install fabric version 1 or better")
def install_data(config_source, approaches=None):
"""Main entry point for installing useful biological data.
"""
PREP_FNS = {"s3": _download_s3_index,
"ggd": _install_with_ggd,
"raw": _prep_raw_index}
if approaches is None: approaches = ["raw"]
ready_approaches = []
for approach in approaches:
ready_approaches.append((approach, PREP_FNS[approach]))
_check_version()
# Append a potentially custom system install path to PATH so tools are found
with path(os.path.join(env.system_install, 'bin')):
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes = [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes] + genome_indexes
_make_genome_directories(env, genomes)
download_transcripts(genomes, env)
_prep_genomes(env, genomes, genome_indexes, ready_approaches)
_install_additional_data(genomes, genome_indexes, config)
def install_data_s3(config_source):
"""Install data using pre-existing genomes present on Amazon s3.
"""
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
_make_genome_directories(env, genomes)
download_transcripts(genomes, env)
_download_genomes(genomes, genome_indexes)
_install_additional_data(genomes, genome_indexes, config)
def install_data_rsync(config_source):
"""Install data using pre-existing genomes from Galaxy rsync servers.
"""
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
# Galaxy stores FASTAs in ucsc format and generates on the fly
if "ucsc" not in genome_indexes:
genome_indexes.append("ucsc")
genome_dir = _make_genome_dir()
galaxy.rsync_genomes(genome_dir, genomes, genome_indexes)
def upload_s3(config_source):
"""Upload prepared genome files by identifier to Amazon s3 buckets.
"""
if boto is None:
raise ImportError("install boto to upload to Amazon s3")
if env.host != "localhost" and not env.host.startswith(socket.gethostname()):
raise ValueError("Need to run S3 upload on a local machine")
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
_data_ngs_genomes(genomes, genome_indexes)
_upload_genomes(genomes, genome_indexes)
def _install_additional_data(genomes, genome_indexes, config):
download_dbsnp(genomes, BROAD_BUNDLE_VERSION, DBSNP_VERSION)
for custom in (config.get("custom") or []):
_prep_custom_genome(custom, genomes, genome_indexes, env)
if config.get("install_liftover", False):
lift_over_genomes = [g.ucsc_name() for (_, _, g) in genomes if g.ucsc_name()]
_data_liftover(lift_over_genomes)
if config.get("install_uniref", False):
_data_uniref()
def _get_genomes(config_source):
if isinstance(config_source, dict):
config = config_source
else:
if yaml is None:
raise ImportError("install yaml to read configuration from %s" % config_source)
with open(config_source) as in_handle:
config = yaml.load(in_handle)
genomes = []
genomes_config = config["genomes"] or []
env.logger.info("List of genomes to get (from the config file at '{0}'): {1}"
.format(config_source, ', '.join(g.get('name', g["dbkey"]) for g in genomes_config)))
for g in genomes_config:
ginfo = None
for info in GENOMES_SUPPORTED:
if info[1] == g["dbkey"]:
ginfo = info
break
assert ginfo is not None, "Did not find download info for %s" % g["dbkey"]
name, gid, manager = ginfo
manager.config = g
genomes.append((name, gid, manager))
indexes = config["genome_indexes"] or []
if "seq" in indexes:
indexes.remove("seq")
indexes.insert(0, "seq")
return genomes, indexes, config
# ## Decorators and context managers
def _if_installed(pname):
"""Run if the given program name is installed.
"""
def argcatcher(func):
def decorator(*args, **kwargs):
if not shared._executable_not_on_path(pname):
return func(*args, **kwargs)
return decorator
return argcatcher
# ## Generic preparation functions
def _make_genome_dir():
genome_dir = os.path.join(env.data_files, "genomes")
if not env.safe_exists(genome_dir):
with settings(warn_only=True):
result = env.safe_run_output("mkdir -p %s" % genome_dir)
else:
result = None
if result is not None and result.failed:
env.safe_sudo("mkdir -p %s" % genome_dir)
env.safe_sudo("chown -R %s %s" % (env.user, genome_dir))
return genome_dir
def _make_genome_directories(env, genomes):
genome_dir = _make_genome_dir()
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname, gid)
if not env.safe_exists(org_dir):
env.safe_run('mkdir -p %s' % org_dir)
def _prep_genomes(env, genomes, genome_indexes, retrieve_fns):
"""Prepare genomes with the given indexes, supporting multiple retrieval methods.
"""
genome_dir = _make_genome_dir()
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname, gid)
if not env.safe_exists(org_dir):
env.safe_run('mkdir -p %s' % org_dir)
for idx in genome_indexes + manager.config.get("annotations", []):
with cd(org_dir):
if not env.safe_exists(idx):
finished = False
for method, retrieve_fn in retrieve_fns:
try:
retrieve_fn(env, manager, gid, idx)
finished = True
break
except KeyboardInterrupt:
raise
except:
# Fail on incorrect GGD recipes
if idx in manager.config.get("annotations", []) and method == "ggd":
raise
else:
env.logger.info("Genome preparation method {0} failed, trying next".format(method))
if not finished:
raise IOError("Could not prepare index {0} for {1} by any method".format(idx, gid))
ref_file = os.path.join(org_dir, "seq", "%s.fa" % gid)
if not env.safe_exists(ref_file):
ref_file = os.path.join(org_dir, "seq", "%s.fa" % manager._name)
assert env.safe_exists(ref_file), ref_file
cur_indexes = manager.config.get("indexes", genome_indexes)
_index_to_galaxy(org_dir, ref_file, gid, cur_indexes, manager.config)
# ## Genomes index for next-gen sequencing tools
def _get_ref_seq(env, manager):
"""Check for or retrieve the reference sequence.
"""
seq_dir = os.path.join(env.cwd, "seq")
ref_file = os.path.join(seq_dir, "%s.fa" % manager._name)
if not env.safe_exists(ref_file):
ref_file, base_zips = manager.download(seq_dir)
ref_file = _move_seq_files(ref_file, base_zips, seq_dir)
return ref_file
def _prep_raw_index(env, manager, gid, idx):
"""Prepare genome from raw downloads and indexes.
"""
env.logger.info("Preparing genome {0} with index {1}".format(gid, idx))
ref_file = _get_ref_seq(env, manager)
get_index_fn(idx)(ref_file)
def _data_ngs_genomes(genomes, genome_indexes):
"""Download and create index files for next generation genomes.
"""
genome_dir = _make_genome_dir()
for organism, genome, manager in genomes:
cur_dir = os.path.join(genome_dir, organism, genome)
env.logger.info("Processing genome {0} and putting it to {1}"\
.format(organism, cur_dir))
if not env.safe_exists(cur_dir):
env.safe_run('mkdir -p %s' % cur_dir)
with cd(cur_dir):
if hasattr(env, "remove_old_genomes") and env.remove_old_genomes:
_clean_genome_directory()
seq_dir = 'seq'
ref_file, base_zips = manager.download(seq_dir)
ref_file = _move_seq_files(ref_file, base_zips, seq_dir)
cur_indexes = manager.config.get("indexes", genome_indexes)
_index_to_galaxy(cur_dir, ref_file, genome, cur_indexes, manager.config)
def _index_to_galaxy(work_dir, ref_file, gid, genome_indexes, config):
"""Index sequence files and update associated Galaxy loc files.
"""
indexes = {}
with cd(work_dir):
for idx in genome_indexes:
index_file = get_index_fn(idx)(ref_file)
if index_file:
indexes[idx] = os.path.join(work_dir, index_file)
galaxy.prep_locs(gid, indexes, config)
class CustomMaskManager:
"""Create a custom genome based on masking an existing genome.
"""
def __init__(self, custom, config):
assert custom.has_key("mask")
self._custom = custom
self.config = config
def download(self, seq_dir):
base_seq = os.path.join(os.pardir, self._custom["base"],
"seq", "{0}.fa".format(self._custom["base"]))
assert env.safe_exists(base_seq)
mask_file = os.path.basename(self._custom["mask"])
ready_mask = apply("{0}-complement{1}".format, os.path.splitext(mask_file))
out_fasta = "{0}.fa".format(self._custom["dbkey"])
if not env.safe_exists(os.path.join(seq_dir, out_fasta)):
if not env.safe_exists(mask_file):
shared._remote_fetch(env, self._custom["mask"])
if not env.safe_exists(ready_mask):
env.safe_run("bedtools complement -i {i} -g {g}.fai > {o}".format(
i=mask_file, g=base_seq, o=ready_mask))
if not env.safe_exists(out_fasta):
env.safe_run("bedtools maskfasta -fi {fi} -bed {bed} -fo {fo}".format(
fi=base_seq, bed=ready_mask, fo=out_fasta))
return out_fasta, [mask_file, ready_mask]
def _prep_custom_genome(custom, genomes, genome_indexes, env):
"""Prepare a custom genome derived from existing genome.
Allows creation of masked genomes for specific purposes.
"""
cur_org = None
cur_manager = None
for org, gid, manager in genomes:
if gid == custom["base"]:
cur_org = org
cur_manager = manager
break
assert cur_org is not None
_data_ngs_genomes([[cur_org, custom["dbkey"],
CustomMaskManager(custom, cur_manager.config)]],
genome_indexes)
def _clean_genome_directory():
"""Remove any existing sequence information in the current directory.
"""
for dirname in GENOME_INDEXES_SUPPORTED + DEFAULT_GENOME_INDEXES:
if env.safe_exists(dirname):
env.safe_run("rm -rf %s" % dirname)
def _move_seq_files(ref_file, base_zips, seq_dir):
if not env.safe_exists(seq_dir):
env.safe_run('mkdir %s' % seq_dir)
for move_file in [ref_file] + base_zips:
if env.safe_exists(move_file):
env.safe_run("mv %s %s" % (move_file, seq_dir))
path, fname = os.path.split(ref_file)
moved_ref = os.path.join(path, seq_dir, fname)
assert env.safe_exists(moved_ref), moved_ref
return moved_ref
# ## Indexing for specific aligners
def _index_w_command(dir_name, command, ref_file, pre=None, post=None, ext=None):
"""Low level function to do the indexing and paths with an index command.
"""
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
full_ref_path = os.path.join(os.pardir, ref_file)
if not env.safe_exists(dir_name):
env.safe_run("mkdir %s" % dir_name)
with cd(dir_name):
if pre:
full_ref_path = pre(full_ref_path)
env.safe_run(command.format(ref_file=full_ref_path, index_name=index_name))
if post:
post(full_ref_path)
return os.path.join(dir_name, index_name)
@_if_installed("faToTwoBit")
def _index_twobit(ref_file):
"""Index reference files using 2bit for random access.
"""
dir_name = "ucsc"
cmd = "faToTwoBit {ref_file} {index_name}"
return _index_w_command(dir_name, cmd, ref_file)
def _index_bowtie(ref_file):
dir_name = "bowtie"
cmd = "bowtie-build -f {ref_file} {index_name}"
return _index_w_command(dir_name, cmd, ref_file)
def _index_bowtie2(ref_file):
dir_name = "bowtie2"
cmd = "bowtie2-build {ref_file} {index_name}"
out_suffix = _index_w_command(dir_name, cmd, ref_file)
bowtie_link = os.path.normpath(os.path.join(os.path.dirname(ref_file), os.path.pardir,
out_suffix + ".fa"))
relative_ref_file = os.path.relpath(ref_file, os.path.dirname(bowtie_link))
if not env.safe_exists(bowtie_link):
env.safe_run("ln -sf %s %s" % (relative_ref_file, bowtie_link))
return out_suffix
def _index_bwa(ref_file):
dir_name = "bwa"
local_ref = os.path.split(ref_file)[-1]
if not env.safe_exists(dir_name):
env.safe_run("mkdir %s" % dir_name)
with cd(dir_name):
env.safe_run("ln -sf %s" % os.path.join(os.pardir, ref_file))
with settings(warn_only=True):
result = env.safe_run("bwa index -a bwtsw %s" % local_ref)
# work around a bug in bwa indexing for small files
if result.failed:
env.safe_run("bwa index %s" % local_ref)
env.safe_run("rm -f %s" % local_ref)
return os.path.join(dir_name, local_ref)
def _index_maq(ref_file):
dir_name = "maq"
cmd = "maq fasta2bfa {ref_file} {index_name}"
def link_local(ref_file):
local = os.path.basename(ref_file)
env.safe_run("ln -sf {0} {1}".format(ref_file, local))
return local
def rm_local(local_file):
env.safe_run("rm -f {0}".format(local_file))
return _index_w_command(dir_name, cmd, ref_file, pre=link_local, post=rm_local)
@_if_installed("novoindex")
def _index_novoalign(ref_file):
dir_name = "novoalign"
cmd = "novoindex {index_name} {ref_file}"
return _index_w_command(dir_name, cmd, ref_file)
@_if_installed("novoalignCS")
def _index_novoalign_cs(ref_file):
dir_name = "novoalign_cs"
cmd = "novoindex -c {index_name} {ref_file}"
return _index_w_command(dir_name, cmd, ref_file)
def _index_sam(ref_file):
(ref_dir, local_file) = os.path.split(ref_file)
with cd(ref_dir):
if not env.safe_exists("%s.fai" % local_file):
env.safe_run("samtools faidx %s" % local_file)
galaxy.index_picard(ref_file)
return ref_file
def _index_star(ref_file):
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = os.path.join(ref_dir, os.pardir, "rnaseq", "ref-transcripts.gtf")
if not os.path.exists(gtf_file):
print "%s not found, skipping creating the STAR index." % (gtf_file)
return None
GenomeLength = os.path.getsize(ref_file)
Nbases = int(round(min(14, log(GenomeLength, 2)/2 - 2), 0))
dir_name = os.path.normpath(os.path.join(ref_dir, os.pardir, "star"))
cpu = mp.cpu_count()
cmd = ("STAR --genomeDir %s --genomeFastaFiles {ref_file} "
"--runThreadN %s "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile %s --genomeSAindexNbases %s" % (dir_name, str(cpu), gtf_file, Nbases))
return _index_w_command(dir_name, cmd, ref_file)
def _index_snap(ref_file):
"""Snap indexing is computationally expensive. Ask for all cores and need 64Gb of memory.
"""
dir_name = "snap"
index_name = os.path.splitext(os.path.basename(ref_file))[0]
org_arg = "-hg19" if index_name in ["hg19", "GRCh37"] else ""
cmd = "snap index {ref_file} {dir_name} -bSpace {org_arg}"
if not env.safe_exists(os.path.join(dir_name, "GenomeIndex")):
env.safe_run(cmd.format(**locals()))
return dir_name
def _index_rtg(ref_file):
"""Perform indexing for use with Real Time Genomics tools.
https://github.com/RealTimeGenomics/rtg-tools
"""
dir_name = "rtg"
index_name = "%s.sdf" % os.path.splitext(os.path.basename(ref_file))[0]
if not env.safe_exists(os.path.join(dir_name, index_name, "done")):
cmd = "rtg format -o {dir_name}/{index_name} {ref_file}"
env.safe_run(cmd.format(**locals()))
return dir_name
@_if_installed("MosaikJump")
def _index_mosaik(ref_file):
hash_size = 15
dir_name = "mosaik"
cmd = "MosaikBuild -fr {ref_file} -oa {index_name}"
def create_jumpdb(ref_file):
jmp_base = os.path.splitext(os.path.basename(ref_file))[0]
dat_file = "{0}.dat".format(jmp_base)
if not env.safe_exists("{0}_keys.jmp".format(jmp_base)):
cmd = "export MOSAIK_TMP=`pwd` && MosaikJump -hs {hash_size} -ia {ref_file} -out {index_name}".format(
hash_size=hash_size, ref_file=dat_file, index_name=jmp_base)
env.safe_run(cmd)
return _index_w_command(dir_name, cmd, ref_file,
post=create_jumpdb, ext=".dat")
# -- Retrieve using GGD recipes
def _install_with_ggd(env, manager, gid, recipe):
assert env.hosts == ["localhost"], "GGD recipes only work for local runs"
recipe_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, "ggd-recipes"))
recipe_file = os.path.join(recipe_dir, gid, "%s.yaml" % recipe)
if os.path.exists(recipe_file):
ggd.install_recipe(env.cwd, recipe_file)
else:
raise NotImplementedError("GGD recipe not available for %s %s" % (gid, recipe))
# -- Genome upload and download to Amazon s3 buckets
def _download_s3_index(env, manager, gid, idx):
env.logger.info("Downloading genome from s3: {0} {1}".format(gid, idx))
url = "https://s3.amazonaws.com/biodata/genomes/%s-%s.tar.xz" % (gid, idx)
out_file = shared._remote_fetch(env, url)
env.safe_run("xz -dc %s | tar -xvpf -" % out_file)
env.safe_run("rm -f %s" % out_file)
def _download_genomes(genomes, genome_indexes):
"""Download a group of genomes from Amazon s3 bucket.
"""
genome_dir = _make_genome_dir()
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname, gid)
if not env.safe_exists(org_dir):
env.safe_run('mkdir -p %s' % org_dir)
for idx in genome_indexes:
with cd(org_dir):
if not env.safe_exists(idx):
_download_s3_index(env, manager, gid, idx)
ref_file = os.path.join(org_dir, "seq", "%s.fa" % gid)
if not env.safe_exists(ref_file):
ref_file = os.path.join(org_dir, "seq", "%s.fa" % manager._name)
assert env.safe_exists(ref_file), ref_file
cur_indexes = manager.config.get("indexes", genome_indexes)
_index_to_galaxy(org_dir, ref_file, gid, cur_indexes, manager.config)
def _upload_genomes(genomes, genome_indexes):
"""Upload our configured genomes to Amazon s3 bucket.
"""
conn = boto.connect_s3()
bucket = conn.create_bucket("biodata")
genome_dir = os.path.join(env.data_files, "genomes")
for (orgname, gid, _) in genomes:
cur_dir = os.path.join(genome_dir, orgname, gid)
_clean_directory(cur_dir, gid)
for idx in genome_indexes:
idx_dir = os.path.join(cur_dir, idx)
tarball = _tar_directory(idx_dir, "%s-%s" % (gid, idx))
_upload_to_s3(tarball, bucket)
bucket.make_public()
def _upload_to_s3(tarball, bucket):
"""Upload the genome tarball to s3.
"""
upload_script = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
"utils", "s3_multipart_upload.py")
s3_key_name = os.path.join("genomes", os.path.basename(tarball))
if not bucket.get_key(s3_key_name):
gb_size = int(run("du -sm %s" % tarball).split()[0]) / 1000.0
print "Uploading %s %.1fGb" % (s3_key_name, gb_size)
cl = ["python", upload_script, tarball, bucket.name, s3_key_name, "--public"]
subprocess.check_call(cl)
def _tar_directory(dir, tar_name):
"""Create a tarball of the directory.
"""
base_dir, tar_dir = os.path.split(dir)
tarball = os.path.join(base_dir, "%s.tar.xz" % tar_name)
if not env.safe_exists(tarball):
with cd(base_dir):
env.safe_run("tar -cvpf - %s | xz -zc - > %s" % (tar_dir,
os.path.basename(tarball)))
return tarball
def _clean_directory(dir, gid):
"""Clean duplicate files from directories before tar and upload.
"""
# get rid of softlinks
bowtie_ln = os.path.join(dir, "bowtie", "%s.fa" % gid)
maq_ln = os.path.join(dir, "maq", "%s.fa" % gid)
for to_remove in [bowtie_ln, maq_ln]:
if env.safe_exists(to_remove):
env.safe_run("rm -f %s" % to_remove)
# remove any downloaded original sequence files
remove_exts = ["*.gz", "*.zip"]
with cd(os.path.join(dir, "seq")):
for rext in remove_exts:
fnames = env.safe_run("find . -name '%s'" % rext)
for fname in (f.strip() for f in fnames.split("\n") if f.strip()):
env.safe_run("rm -f %s" % fname)
# == Liftover files
def _data_liftover(lift_over_genomes):
"""Download chain files for running liftOver.
Does not install liftOver binaries automatically.
"""
lo_dir = os.path.join(env.data_files, "liftOver")
if not env.safe_exists(lo_dir):
env.safe_run("mkdir %s" % lo_dir)
lo_base_url = "ftp://hgdownload.cse.ucsc.edu/goldenPath/%s/liftOver/%s"
lo_base_file = "%sTo%s.over.chain.gz"
for g1 in lift_over_genomes:
for g2 in [g for g in lift_over_genomes if g != g1]:
g2u = g2[0].upper() + g2[1:]
cur_file = lo_base_file % (g1, g2u)
non_zip = os.path.splitext(cur_file)[0]
worked = False
with cd(lo_dir):
if not env.safe_exists(non_zip):
result = shared._remote_fetch(env, "%s" % (lo_base_url % (g1, cur_file)), allow_fail=True)
# Lift over back and forths don't always exist
# Only move forward if we found the file
if result:
worked = True
env.safe_run("gunzip %s" % result)
if worked:
ref_parts = [g1, g2, os.path.join(lo_dir, non_zip)]
galaxy.update_loc_file("liftOver.loc", ref_parts)
# == UniRef
def _data_uniref():
"""Retrieve and index UniRef databases for protein searches.
http://www.ebi.ac.uk/uniref/
These are currently indexed for FASTA searches. Are other indexes desired?
Should this be separated out and organized by program like genome data?
This should also check the release note and automatically download and
replace older versions.
"""
site = "ftp://ftp.uniprot.org"
base_url = site + "/pub/databases/uniprot/" \
"current_release/uniref/%s/%s"
for uniref_db in ["uniref50", "uniref90", "uniref100"]:
work_dir = os.path.join(env.data_files, "uniref", uniref_db)
if not env.safe_exists(work_dir):
env.safe_run("mkdir -p %s" % work_dir)
base_work_url = base_url % (uniref_db, uniref_db)
fasta_url = base_work_url + ".fasta.gz"
base_file = os.path.splitext(os.path.basename(fasta_url))[0]
with cd(work_dir):
if not env.safe_exists(base_file):
out_file = shared._remote_fetch(env, fasta_url)
env.safe_run("gunzip %s" % out_file)
shared._remote_fetch(env, base_work_url + ".release_note")
_index_blast_db(work_dir, base_file, "prot")
def _index_blast_db(work_dir, base_file, db_type):
"""Index a database using blast+ for similary searching.
"""
type_to_ext = dict(prot = ("phr", "pal"), nucl = ("nhr", "nal"))
db_name = os.path.splitext(base_file)[0]
with cd(work_dir):
if not reduce(operator.or_,
(env.safe_exists("%s.%s" % (db_name, ext)) for ext in type_to_ext[db_type])):
env.safe_run("makeblastdb -in %s -dbtype %s -out %s" %
(base_file, db_type, db_name))
def get_index_fn(index):
"""
return the index function for an index, if it is missing return a function
that is a no-op
"""
return INDEX_FNS.get(index, lambda x: None)
INDEX_FNS = {
"seq" : _index_sam,
"bwa" : _index_bwa,
"bowtie": _index_bowtie,
"bowtie2": _index_bowtie2,
"maq": _index_maq,
"mosaik": _index_mosaik,
"novoalign": _index_novoalign,
"novoalign_cs": _index_novoalign_cs,
"ucsc": _index_twobit,
"star": _index_star,
"snap": _index_snap,
"rtg": _index_rtg,
}
|
heuermh/cloudbiolinux
|
cloudbio/biodata/genomes.py
|
Python
|
mit
| 39,516
|
[
"BLAST",
"BWA",
"Bowtie",
"Galaxy"
] |
252ca0dd5be0e03347680cb48971b81d6d685c98183813a283cf67998dd42af7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A pedestrian version of The Cannon.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["CannonModel"]
import logging
from numpy.linalg import inv
import numpy as np
import scipy.optimize as op
import os
from scipy.ndimage import gaussian_filter
# by Jason
# try to use gnumpy
import math
import pyopencl as cl
import numpy as np
import os
from pyopencl.tools import get_test_platforms_and_devices
def log10(x):
return math.log10(x)
# sinc interpolation
def sinc_interp(x, s, u):
"""
Interpolates x, sampled at "s" instants
Output y is sampled at "u" instants ("u" for "upsampled")
from Matlab:
http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html
"""
if len(x) != len(s):
print("len(x) should be equal to len(s")
# Find the period
T = s[1] - s[0]
sincM = np.tile(u, (len(s), 1)) - np.tile(s[:, np.newaxis], (1, len(u)))
y = np.dot(x, np.sinc(sincM / T))
return y
from . import (model, utils)
logger = logging.getLogger(__name__)
class CannonModel(model.BaseCannonModel):
"""
A generalised Cannon model for the estimation of arbitrary stellar labels.
:param labelled_set:
A set of labelled objects. The most common input form is a table with
columns as labels, and stars/objects as rows.
:type labelled_set:
:class:`~astropy.table.Table` or a numpy structured array
:param normalized_flux:
An array of normalized fluxes for stars in the labelled set, given as
shape `(num_stars, num_pixels)`. The `num_stars` should match the number
of rows in `labelled_set`.
:type normalized_flux:
:class:`np.ndarray`
:param normalized_ivar:
An array of inverse variances on the normalized fluxes for stars in the
labelled set. The shape of the `normalized_ivar` array should match that
of `normalized_flux`.
:type normalized_ivar:
:class:`np.ndarray`
:param dispersion: [optional]
The dispersion values corresponding to the given pixels. If provided,
this should have length `num_pixels`.
:type dispersion:
:class:`np.array`
:param threads: [optional]
Specify the number of parallel threads to use. If `threads > 1`, the
training and prediction phases will be automagically parallelised.
:type threads:
int
:param pool: [optional]
Specify an optional multiprocessing pool to map jobs onto.
This argument is only used if specified and if `threads > 1`.
:type pool:
bool
"""
def __init__(self, *args, **kwargs):
super(CannonModel, self).__init__(*args, **kwargs)
@model.requires_model_description
def train(self, fixed_scatter=True, **kwargs):
"""
Train the model based on the labelled set.
"""
# Experimental/asthetic keywords:
# use_neighbouring_pixels, progressbar
assert fixed_scatter, "Are you refactoring?"
if self.s2 is None:
logger.warn("Fixing and assuming s2 = 0")
self.s2 = 0
if fixed_scatter and self.s2 is None:
raise ValueError("intrinsic pixel variance (s2) must be set "
"before training if fixed_scatter is set to True")
# We default use_neighbouring_pixels to None so that we can default it
# to True later if we want, but we can warn the user if they explicitly
# set it to True and we intend to ignore it.
use_neighbouring_pixels = kwargs.pop("use_neighbouring_pixels", None)
if self.theta is None:
if use_neighbouring_pixels is None:
use_neighbouring_pixels = True
initial_theta = [None] * self.dispersion.size
else:
# Since theta is already set, we will ignore neighbouring pixels.
if use_neighbouring_pixels is True:
use_neighbouring_pixels = False
logger.warn("Ignoring neighbouring pixels because theta is "
"already provided.")
initial_theta = self.theta.copy()
# Initialize the scatter.
initial_s2 = self.s2 if fixed_scatter \
else 0.01**2 * np.ones_like(self.dispersion)
# Prepare the method and arguments.
fitting_function = kwargs.pop("function", _fit_pixel)
kwds = {
"fixed_scatter": fixed_scatter,
"design_matrix": self.design_matrix,
"op_kwargs": kwargs.pop("op_kwargs", {}),
"op_bfgs_kwargs": kwargs.pop("op_bfgs_kwargs", {})
}
N_stars, N_pixels = self.normalized_flux.shape
logger.info("Training {0}-label {1} with {2} stars and {3} pixels/star"\
.format(len(self.vectorizer.label_names), type(self).__name__,
N_stars, N_pixels))
# Arguments:
# initial_theta, initial_s2, flux, ivar, design_matrix_mask,
# [additional_args],
# design_matrix, **kwargs
args = [initial_theta, initial_s2, self.normalized_flux.T,
self.normalized_ivar.T, self.censored_vectorizer_terms]
args.extend(kwargs.get("additional_args", []))
# Write the design matrix to a temporary file.
temporary_filenames = []
"""
Not clear whether this is still needed because this issue was
complicated by some Legacy Python issues. But I'm not ready to remove
this comment because I have forgotten about this issue twice before in
the past and it ruined my day.
temporary_filename = utils._pack_value(self.design_matrix)
kwds["design_matrix"] = temporary_filename
temporary_filenames.append(temporary_filename)
"""
N_items = N_pixels if kwargs.get("progressbar", True) else 0
# Wrap the function so we can parallelize it out.
mapper = map if self.pool is None else self.pool.map
try:
f = utils.wrapper(fitting_function, None, kwds, N_items)
if self.pool is None and use_neighbouring_pixels:
output = []
last_theta = []
for j, row in enumerate(zip(*args)):
if j > 0:
# Update with determined theta from neighbour pixel.
row = list(row)
row[0] = last_theta
row = tuple(row)
output.append(f(row))
last_theta = output[-1][0][:self.design_matrix.shape[1]]
else:
output = mapper(f, [row for row in zip(*args)])
except KeyboardInterrupt:
logger.warn("Keyboard interrupted training step!")
# Clean up any temporary files in case we are debugging.
for filename in temporary_filenames:
if os.path.exists(filename): os.remove(filename)
# re-raise a suppressed exception?
raise
# Clean up any temporary files.
for filename in temporary_filenames:
if os.path.exists(filename): os.remove(filename)
# Unpack the results.
results, metadata = zip(*output)
results = np.array(results)
self.theta, self.s2 = (results[:, :-1], results[:, -1])
return None
@model.requires_training_wheels
def predict(self, labels, **kwargs):
"""
Predict spectra from the trained model, given the labels.
:param labels:
The label values to predict model spectra of. The length and order
should match what is required of the vectorizer
(`CannonModel.vectorizer.label_names`).
"""
return np.dot(self.theta, self.vectorizer(labels).T).T
@model.requires_training_wheels
# By Jason Cao
def fitting_spectrum_parameters(self,normalized_flux,normalized_ivar,inf_flux):
nor = normalized_flux
inf = inf_flux
ivar = normalized_ivar
n_pixel = nor[0, :].size
n_star = inf[:, 0].size
one = np.ones(n_star)
# new method for building matrix
x_data = np.c_[one,inf]
x_data = x_data[:,0:n_pixel]
y_data =inf
z_data = np.c_[inf,one]
z_data = z_data[:,1:n_pixel+1]
# fit
# It's not good. let's do it one star each time.
left = np.zeros((3,3))
right = np.zeros(3)
for p in range(0, n_star):
x_data_p = x_data[p, :]
y_data_p = y_data[p, :]
z_data_p = z_data[p, :]
nor_p = nor[p, :]
ivar_p = ivar[p, :]
# construct
ivar_r = ivar_p.ravel()
ni = len(ivar_r)
print("calculating parameters",p,"{:.2f}%".format(p/n_star*100))
c = np.zeros((ni, ni))
for i in range(0, ni):
c[i, i] = ivar_r[i]
y = nor_p.ravel()
a = np.c_[np.c_[x_data_p.ravel(), y_data_p.ravel()], z_data_p.ravel()]
left += np.dot(np.dot(a.T, c), a)
right += np.dot(np.dot(a.T,c), y)
parameters = np.dot(inv(left), right)
opt_flux = parameters[0]*x_data+parameters[1]*y_data+parameters[2]*z_data
print("finish fitting")
# build theta:
zero = np.ones(n_pixel)
theta_x = np.c_[zero,self.theta]
theta_x = x_data[:,0:n_pixel]
theta_y =inf
theta_z = np.c_[self.theta,zero]
theta_z = z_data[:,1:n_pixel+1]
theta_opt = parameters[0]*theta_x+parameters[1]*theta_y+parameters[2]*theta_z
return opt_flux,theta_opt,parameters
# return the parameters of each star.
# Now the uncertainty of parameters is also calculated
# The structure of the uncertainty is each row is aa,ab,ac ba....
# so the dimension is 3*3*N, which is a 3 dimension array
# use self.uncertainty to store
# Now the model
def fitting_spectrum_parameters_single(self,normalized_flux,normalized_ivar,inf_flux):
#at least 2d:
normalized_flux = np.atleast_2d(normalized_flux)
normalized_ivar = np.atleast_2d(normalized_ivar)
inf_flux = np.atleast_2d(inf_flux)
# do it
nor = normalized_flux
inf = inf_flux
ivar = normalized_ivar
n_pixel = nor[0, :].size
n_star = inf[:, 0].size
one = np.ones(n_star)
# new method for building matrix
z_data = np.c_[one,inf]
z_data = z_data[:,0:n_pixel]
y_data =inf
x_data = np.c_[inf,one]
x_data = x_data[:,1:n_pixel+1]
self.x_data =x_data
self.y_data =y_data
self.z_data =z_data
# fit
# It's not good. let's do it one star each time.
left = np.zeros((3,3))
right = np.zeros(3)
un = np.zeros((3,3))
parameters=np.array([0,1,0])
opt_flux = np.ones(n_pixel)
for p in range(0, n_star):
x_data_p = x_data[p, :]
y_data_p = y_data[p, :]
z_data_p = z_data[p, :]
nor_p = nor[p, :]
ivar_p = ivar[p, :]
# construct
ivar_r = ivar_p.ravel()
ni = len(ivar_r)
print("calculating parameters",p,"{:.2f}%".format(p/n_star*100))
c = np.zeros((ni, ni))
for i in range(0, ni):
c[i, i] = ivar_r[i]
y = nor_p.ravel()
a = np.c_[np.c_[x_data_p.ravel(), y_data_p.ravel()], z_data_p.ravel()]
left = np.dot(np.dot(a.T, c), a)
right = np.dot(np.dot(a.T,c), y)
un_p = inv(left)
parameters_p =np.dot(inv(left), right)
opt_flux = np.vstack((opt_flux,parameters_p[0]*x_data_p+parameters_p[1]*y_data_p+parameters_p[2]*z_data_p))
parameters = np.vstack((parameters,parameters_p))
print(parameters_p)
un = np.dstack((un,un_p))
print("finish fitting")
# reshape
parameters = parameters[1:(n_star+1),:]
opt_flux = opt_flux[1:(n_star + 1), :]
# new method for calculating velocity uncertainty:
# use gamma!
gamma = un[:,:,1:(n_star + 1)]
# calculate for each star
N_star = len(parameters[:,0])
RV_un = []
for i in range(0,N_star):
a = parameters[i,0]
b = parameters[i,1]
c = parameters[i,2]
J = np.array([-(2.*c+b)/(a+b+c)**2., (a-c)/(a+b+c)**2. , (2.*a+b)/(a+b+c)**2.])
gamma_i = gamma[:,:,i]
RV_un.append(4144.68*(np.dot(np.dot(J,gamma_i),J.T))**0.5)
RV_un = np.array(RV_un)
# use gamma!
self.un_cov = gamma
self.uncertainty = RV_un
self.opt_flux = opt_flux
# the shape of the uncertainty is 3*3*N
print(parameters.shape,n_star,opt_flux.shape,RV_un.shape)
return opt_flux,parameters
def fitting_abc_labels_meantime_single(self,flux,ivar):
# input many stars
theta = self.theta
# Set the boundary to be 0
one = 0 * np.ones(len(theta[0, :]))
row = len(theta[:, 0])
# x
theta_x = np.vstack((theta, one))
theta_x = theta_x[1:row + 1, :]
# y
theta_y = theta
# z
theta_z = np.vstack((one, theta))
theta_z = theta_z[0, :row]
# This is a three-label version:
vectorizer = self.vectorizer
s2 = 0
adjusted_ivar = ivar / (1. + ivar * s2)
adjusted_sigma = np.sqrt(1.0 / adjusted_ivar)
adjusted_sigma = np.array(adjusted_sigma)
# Exclude non-finite points (e.g., points with zero inverse variance
# or non-finite flux values, but the latter shouldn't exist anyway).
use = np.isfinite(adjusted_sigma * flux)
N_labels = vectorizer.scales.size
# build a function
# The first 3 parameters are labels. The last three are abc
def f(xdata,*parameters):
y = parameters[N_labels]*np.dot(theta_x, vectorizer(parameters[:N_labels]).T) + parameters[N_labels+1]*np.dot(theta_y, vectorizer(parameters[:N_labels]).T) + parameters[N_labels+2]*np.dot(theta_z, vectorizer(parameters[:N_labels]).T)
y = np.array(y)
print(y.shape)
return y[use]
# key words
ir = [None,5000,1,-1,0,1,0]
print(f(*ir))
kwds = {
"f": f,
"xdata": None,
"ydata": flux,
"sigma": adjusted_sigma,
"absolute_sigma": True,
# These get passed through to leastsq:
#"Dfun": Dfun,
"col_deriv": True,
"ftol": 7. / 3 - 4. / 3 - 1, # Machine precision.
"xtol": 7. / 3 - 4. / 3 - 1, # Machine precision.
"gtol": 0.0,
"maxfev": 100000, # MAGIC
"epsfcn": None,
"factor": 0.1, # Smallest step size available for gradient approximation
"diag": 1.0 / vectorizer.scales
}
# scipy opt
# initial value
kwds["p0"] = np.array([4678.85000,1.98000000,-1.38500000,0.1,0.8,0.1])
op_labels, cov = op.curve_fit(**kwds)
print(op_labels)
return op_labels
# Return delta_chi_squared, which should be bigger than 0
def delta_chi_squared(self,normalzied_flux,normalized_ivar,inf_flux):
opt_flux = self.opt_flux
N_star = len(inf_flux[:,0])
delta_chi = []
chi_old = []
chi_new = []
for p in range(0, N_star):
ivar_r = normalized_ivar[p, :]
ni = len(ivar_r)
c = np.zeros((ni, ni))
print("Calculating delta-chi-squared",p,"{:.2f}%".format(p/N_star*100))
for i in range(0, ni):
c[i, i] = ivar_r[i]
# correct chi-squared
a_old = np.dot(np.dot(normalzied_flux[p, :] - inf_flux[p, :], c), (normalzied_flux[p, :] - inf_flux[p, :]).T)
a_opt = np.dot(np.dot(normalzied_flux[p, :] - opt_flux[p, :], c), (normalzied_flux[p, :] - opt_flux[p, :]).T)
delta_p = a_old-a_opt
chi_old.append(a_old)
chi_new.append(a_opt)
delta_chi.append(delta_p)
delta_chi = np.array(delta_chi)
chi_new = np.array(chi_new)
chi_old = np.array(chi_old)
self.chi_squared = chi_new
self.chi_squared_old = chi_old
return delta_chi
def fit(self, normalized_flux, normalized_ivar, initial_labels=None,
model_lsf=False, model_redshift=False, full_output=False, **kwargs):
"""
Solve the labels for the given normalized fluxes and inverse variances.
:param normalized_flux:
A `(N_star, N_pixels)` shape of normalized fluxes that are on the
same dispersion scale as the trained data.
:param normalized_ivar:
The inverse variances of the normalized flux values. This should
have the same shape as `normalized_flux`.
:param initial_labels: [optional]
The initial points to optimize from. If not given, only one
initialization will be made from the fiducial label point.
:param model_lsf: [optional]
Optionally convolve the spectral model with a Gaussian broadening
kernel of unknown width when fitting the data.
:param model_redshift: [optional]
Optionally redshift the spectral model when fitting the data.
:returns:
The labels. If `full_output` is set to True, then a three-length
tuple of `(labels, covariance_matrix, metadata)` will be returned.
"""
normalized_flux = np.atleast_2d(normalized_flux)
normalized_ivar = np.atleast_2d(normalized_ivar)
N_spectra = normalized_flux.shape[0]
if initial_labels is None:
initial_labels = self.vectorizer.fiducials
initial_labels = np.atleast_2d(initial_labels)
# Prepare the wrapper function and data.
message = None if not kwargs.pop("progressbar", True) \
else "Fitting {0} spectra".format(N_spectra)
f = utils.wrapper(_fit_spectrum,
(self.dispersion, initial_labels, self.vectorizer, self.theta,
self.s2, model_lsf, model_redshift),
kwargs, N_spectra, message=message)
args = (normalized_flux, normalized_ivar)
mapper = map if self.pool is None else self.pool.map
labels, cov, metadata = zip(*mapper(f, zip(*args)))
labels, cov = (np.array(labels), np.array(cov))
return (labels, cov, metadata) if full_output else labels
##### By Jason
def fit_opt(self, normalized_flux, normalized_ivar, initial_labels,
model_lsf=False, model_redshift=False, full_output=False, **kwargs):
"""
Solve the labels for the given normalized fluxes and inverse variances.
:param normalized_flux:
A `(N_star, N_pixels)` shape of normalized fluxes that are on the
same dispersion scale as the trained data.
:param normalized_ivar:
The inverse variances of the normalized flux values. This should
have the same shape as `normalized_flux`.
:param initial_labels: [optional]
The initial points to optimize from. If not given, only one
initialization will be made from the fiducial label point.
:param model_lsf: [optional]
Optionally convolve the spectral model with a Gaussian broadening
kernel of unknown width when fitting the data.
:param model_redshift: [optional]
Optionally redshift the spectral model when fitting the data.
:returns:
The labels. If `full_output` is set to True, then a three-length
tuple of `(labels, covariance_matrix, metadata)` will be returned.
"""
normalized_flux = np.atleast_2d(normalized_flux)
normalized_ivar = np.atleast_2d(normalized_ivar)
N_spectra = normalized_flux.shape[0]
"""
if initial_labels is None:
initial_labels = self.vectorizer.fiducials
"""
initial_labels = np.atleast_2d(initial_labels)
"""
if initial_labels is None:
initial_labels = self.vectorizer.fiducials
else:
initial_labels = np.atleast_2d(initial_labels)
"""
# Prepare the wrapper function and data.
message = None if not kwargs.pop("progressbar", True) \
else "Fitting {0} spectra".format(N_spectra)
result = _fit_spectrum_opt2(normalized_flux,normalized_ivar,self.dispersion, initial_labels, self.vectorizer, self.theta,
self.s2, model_lsf, model_redshift)
return result
@model.requires_training_wheels
def _set_s2_by_hogg_heuristic(self):
"""
Set the pixel scatter by Hogg's heuristic.
See https://github.com/andycasey/AnniesLasso_2/issues/31 for more details.
"""
model_flux = self.predict(self.labels_array)
residuals_squared = (model_flux - self.normalized_flux)**2
def objective_function(s, residuals_squared, ivar):
adjusted_ivar = ivar/(1. + ivar * s**2)
chi_sq = residuals_squared * adjusted_ivar
return (np.mean(chi_sq) - 1.0)**2
s = []
for j in range(self.dispersion.size):
s.append(op.fmin(objective_function, 0,
args=(residuals_squared[:, j], self.normalized_ivar[:, j]),
disp=False))
self.s2 = np.array(s)**2
return True
def _estimate_label_vector(theta, s2, normalized_flux, normalized_ivar,
**kwargs):
"""
Perform a matrix inversion to estimate the values of the label vector given
some normalized fluxes and associated inverse variances.
:param theta:
The theta coefficients obtained from the training phase.
:param s2:
The intrinsic pixel variance.
:param normalized_flux:
The normalized flux values. These should be on the same dispersion scale
as the labelled data set.
:param normalized_ivar:
The inverse variance of the normalized flux values. This should have the
same shape as `normalized_flux`.
"""
inv_var = normalized_ivar/(1. + normalized_ivar * s2)
A = np.dot(theta.T, inv_var[:, None] * theta)
B = np.dot(theta.T, inv_var * normalized_flux)
return np.linalg.solve(A, B)
def _fit_spectrum(normalized_flux, normalized_ivar, dispersion, initial_labels,
vectorizer, theta, s2, model_lsf=False, model_redshift=False, **kwargs):
"""
Fit a single spectrum by least-squared fitting.
:param normalized_flux:
The normalized flux values.
:param normalized_ivar:
The inverse variance array for the normalized fluxes.
:param dispersion:
The dispersion (e.g., wavelength) points for the normalized fluxes.
:param initial_labels:
The point(s) to initialize optimization from.
:param vectorizer:
The vectorizer to use when fitting the data.
:param theta:
The theta coefficients (spectral derivatives) of the trained model.
:param s2:
The pixel scatter (s^2) array for each pixel.
:param model_lsf: [optional]
Convolve the spectral model with a Gaussian kernel at fitting time.
:param model_redshift: [optional]
Allow for a residual redshift in the spectral model at fitting time.
"""
adjusted_ivar = normalized_ivar/(1. + normalized_ivar * s2)
adjusted_sigma = np.sqrt(1.0/adjusted_ivar)
# Exclude non-finite points (e.g., points with zero inverse variance
# or non-finite flux values, but the latter shouldn't exist anyway).
use = np.isfinite(adjusted_sigma * normalized_flux)
N_labels = vectorizer.scales.size
if not np.any(use):
logger.warn("No information in spectrum!")
return (np.nan * np.ones(N_labels), None, {
"fail_message": "Pixels contained no information"})
normalized_flux = normalized_flux[use]
adjusted_sigma = adjusted_sigma[use]
max_abs_velocity = abs(kwargs.get("max_abs_velocity", 10))
# Check the vectorizer whether it has a derivative built in.
if kwargs.get("Dfun", False):
try:
vectorizer.get_label_vector_derivative(vectorizer.fiducials)
except NotImplementedError:
Dfun = None
logger.debug("No label vector derivative available!")
except:
logger.exception("Exception raised when trying to calculate the "
"label vector derivative at the fiducial values:")
raise
else:
# Use the label vector derivative.
"""
# Presumably because of the way leastsq works, the adjusted_inv_sigma
# does not enter here, otherwise we get incorrect results.
Dfun = lambda xdata, l: \
np.dot(theta, vectorizer.get_label_vector_derivative(*l)).T[use]
"""
raise NotImplementedError("requires a thinko")
def Dfun(labels, xdata, ydata, f, adjusted_inv_sigma):
return np.dot(theta,
vectorizer.get_label_vector_derivative(labels)).T[:, use]
else:
Dfun = None
mean_pixel_scale = 1.0/np.diff(dispersion).mean() # px/Angstrom
def f(xdata, *parameters):
y = np.dot(theta, vectorizer(parameters[:N_labels]).T)[:, 0]
# Convolve?
if model_lsf:
# This will always be the last parameter.
y = gaussian_filter(y, abs(parameters[-1]) * mean_pixel_scale)
# Redshift?
if model_redshift:
index = -2 if model_lsf else -1
v = parameters[index]
if np.abs(v) >= max_abs_velocity:
logger.debug("Returning NaNs because outside of max velocity")
return np.nan * np.ones(sum(use))
y = np.interp(dispersion,
dispersion * (1 + v/299792.458), y,
left=np.nan, right=np.nan)
return y[use]
kwds = {
"f": f,
"xdata": None,
"ydata": normalized_flux,
"sigma": adjusted_sigma,
"absolute_sigma": True,
# These get passed through to leastsq:
"Dfun": Dfun,
"col_deriv": True,
"ftol": 7./3 - 4./3 - 1, # Machine precision.
"xtol": 7./3 - 4./3 - 1, # Machine precision.
"gtol": 0.0,
"maxfev": 100000, # MAGIC
"epsfcn": None,
"factor": 0.1, # Smallest step size available for gradient approximation
"diag": 1.0/vectorizer.scales
}
# Only update the keywords with things that op.curve_fit/op.leastsq expects.
for key in set(kwargs).intersection(kwds):
if key == "Dfun": continue
kwds[key] = kwargs[key]
results = []
for p0 in np.atleast_2d(initial_labels):
kwds["p0"] = list(p0)
if model_redshift:
kwds["p0"] += [0]
if model_lsf:
kwds["p0"] += [5] # MAGIC
try:
op_labels, cov = op.curve_fit(**kwds)
except RuntimeError:
logger.exception("Exception in fitting from {}".format(p0))
continue
fvec = f(None, *op_labels)
meta = {
"p0": kwds["p0"],
"fvec": fvec,
"chi_sq": np.sum((fvec - normalized_flux)**2 / adjusted_sigma**2),
}
results.append((op_labels, cov, meta))
if len(results) == 0:
logger.warn("No results found!")
return (np.nan * np.ones(N_labels), None, {"fail_message": "No results found"})
best_result_index = np.nanargmin([m["chi_sq"] for (o, c, m) in results])
op_labels, cov, meta = results[best_result_index]
if np.allclose(op_labels, meta["p0"]):
logger.warn("Discarding optimized result because it is the same as the "
"initial value!")
# We are in dire straits. We should not trust the result.
op_labels *= np.nan
if not np.any(np.isfinite(cov)):
logger.warn("Non-finite covariance matrix returned!")
# Defaults for LSF/redshift parameters
meta.update(kernel=0, redshift=0)
for key, effect in zip(("kernel", "redshift"), (model_lsf, model_redshift)):
if effect:
meta[key] = op_labels[-1]
op_labels = op_labels[:-1]
# Save additional information.
meta.update({
"kernel": abs(meta["kernel"]),
"label_names": vectorizer.label_names,
"best_result_index": best_result_index,
"method": "curve_fit",
"derivatives_used": Dfun is not None,
"snr": np.nanmedian(normalized_flux * np.sqrt(normalized_ivar[use])),
"r_chi_sq": meta["chi_sq"]/(use.sum() - len(vectorizer.fiducials) - 1),
"model_flux": np.dot(theta, vectorizer(op_labels).T).flatten(),
})
for key in ("ftol", "xtol", "gtol", "maxfev", "factor", "epsfcn"):
meta[key] = kwds[key]
return (op_labels, cov, meta)
#############
# By Jason
def _fit_spectrum_opt2(normalized_flux, normalized_ivar, dispersion, initial_labels,
vectorizer, theta, s2, model_lsf=False, model_redshift=False, **kwargs):
"""
Fit a single spectrum by least-squared fitting.
:param normalized_flux:
The normalized flux values.
:param normalized_ivar:
The inverse variance array for the normalized fluxes.
:param dispersion:
The dispersion (e.g., wavelength) points for the normalized fluxes.
:param initial_labels:
The point(s) to initialize optimization from.
:param vectorizer:
The vectorizer to use when fitting the data.
:param theta:
The theta coefficients (spectral derivatives) of the trained model.
:param s2:
The pixel scatter (s^2) array for each pixel.
:param model_lsf: [optional]
Convolve the spectral model with a Gaussian kernel at fitting time.
:param model_redshift: [optional]
Allow for a residual redshift in the spectral model at fitting time.
"""
adjusted_ivar = normalized_ivar / (1. + normalized_ivar * s2)
adjusted_sigma = np.sqrt(1.0 / adjusted_ivar)
# Exclude non-finite points (e.g., points with zero inverse variance
# or non-finite flux values, but the latter shouldn't exist anyway).
use = np.isfinite(adjusted_sigma * normalized_flux)
N_labels = vectorizer.scales.size
if not np.any(use):
logger.warn("No information in spectrum!")
return (np.nan * np.ones(N_labels), None, {
"fail_message": "Pixels contained no information"})
normalized_flux = normalized_flux[use]
adjusted_sigma = adjusted_sigma[use]
max_abs_velocity = abs(kwargs.get("max_abs_velocity", 10))
# Check the vectorizer whether it has a derivative built in.
if kwargs.get("Dfun", False):
try:
vectorizer.get_label_vector_derivative(vectorizer.fiducials)
except NotImplementedError:
Dfun = None
logger.debug("No label vector derivative available!")
except:
logger.exception("Exception raised when trying to calculate the "
"label vector derivative at the fiducial values:")
raise
else:
# Use the label vector derivative.
"""
# Presumably because of the way leastsq works, the adjusted_inv_sigma
# does not enter here, otherwise we get incorrect results.
Dfun = lambda xdata, l: \
np.dot(theta, vectorizer.get_label_vector_derivative(*l)).T[use]
"""
raise NotImplementedError("requires a thinko")
def Dfun(labels, xdata, ydata, f, adjusted_inv_sigma):
return np.dot(theta,
vectorizer.get_label_vector_derivative(labels)).T[:, use]
else:
Dfun = None
mean_pixel_scale = 1.0 / np.diff(dispersion).mean() # px/Angstrom
# construct theta_x theta_y and theta_z
# construct theta_xyz
# Set the boundary to be 0
one = 0*np.ones(len(theta[0, :]))
row = len(theta[:, 0])
# x
theta_x = np.vstack((theta, one))
theta_x = theta_x[1:row + 1, :]
# y
theta_y = theta
# z
theta_z = np.vstack((one, theta))
theta_z = theta_z[0:row,:]
def f_opt(xdata, *parameters):
y = parameters[N_labels]*np.dot(theta_x, vectorizer(parameters[:N_labels]).T)+parameters[N_labels+1]*np.dot(theta_y, vectorizer(parameters[:N_labels]).T)+parameters[N_labels+2]*np.dot(theta_z, vectorizer(parameters[:N_labels]).T)
y = np.atleast_2d(y)
y = y[:,0]
y = np.atleast_2d(y)
# Convolve?
if model_lsf:
# This will always be the last parameter.
y = gaussian_filter(y, abs(parameters[-4]) * mean_pixel_scale)
# Redshift?
if model_redshift:
index = -2 if model_lsf else -1
v = parameters[index]
if np.abs(v) >= max_abs_velocity:
logger.debug("Returning NaNs because outside of max velocity")
return np.nan * np.ones(sum(use))
y = np.interp(dispersion,
dispersion * (1 + v / 299792.458), y,
left=np.nan, right=np.nan)
return y[use]
kwds = {
"f": f_opt,
"xdata": None,
"ydata": normalized_flux,
"sigma": adjusted_sigma,
"absolute_sigma": True,
# These get passed through to leastsq:
"Dfun": Dfun,
"col_deriv": True,
"ftol": 7. / 3 - 4. / 3 - 1, # Machine precision.
"xtol": 7. / 3 - 4. / 3 - 1, # Machine precision.
"gtol": 0.0,
"maxfev": 100000, # MAGIC
"epsfcn": None,
"factor": 0.1, # Smallest step size available for gradient approximation
"diag": 1.0 / vectorizer.scales
}
# Only update the keywords with things that op.curve_fit/op.leastsq expects.
for key in set(kwargs).intersection(kwds):
if key == "Dfun": continue
kwds[key] = kwargs[key]
results = []
a_all = []
b_all = []
c_all = []
# Jason
"""
# add something to the initial labels
initial_labels = np.atleast_2d(initial_labels)
row = len(initial_labels[:,0])
initial_labels = np.hstack((initial_labels,0*np.ones((row,1))))
initial_labels = np.hstack((initial_labels, 1*np.ones((row, 1))))
initial_labels = np.hstack((initial_labels, 0*np.ones((row, 1))))
"""
print("initial labels")
print(initial_labels)
for p0 in np.atleast_2d(initial_labels):
kwds["p0"] = list(p0)
if model_redshift:
kwds["p0"] += [0]
if model_lsf:
kwds["p0"] += [5] # MAGIC
try:
op_labels, cov = op.curve_fit(**kwds)
# let's separate a,b,c from labels
# The are combined together
print("simultaneously optimize abc and labels")
print(op_labels)
c20 = op_labels
c20_a = cov
except RuntimeError:
logger.exception("Exception in fitting from {}".format(p0))
c20 = initial_labels
c20_a = []
continue
return c20, c20_a
# Let's calculate uncertainty of RVs
"""
a1 = c20[-3]
b1 = c20[-2]
c1 = c20[-1]
J = np.array([-(2. * c1 + b1) / (a1 + b1 + c1) ** 2., (a1 - c1) / (a1 + b1 + c1) ** 2., (2. * a1 + b1) / (a1 + b1 + c1) ** 2.])
gamma_i = cov[3:6,3:6]
print(gamma_i)
c20_a = 4144.68 * (np.dot(np.dot(J, gamma_i), J.T)) ** 0.5
"""
def _fit_pixel(initial_theta, initial_s2, normalized_flux, normalized_ivar,
design_matrix, fixed_scatter, **kwargs):
"""
Return the optimal model coefficients and pixel scatter given the normalized
flux, the normalized inverse variance, and the design matrix.
:param initial_theta:
The initial model coefficients to optimize from.
:param initial_s2:
The initial pixel scatter (s^2) terms to optimize from (if fixed_scatter
is False).
:param normalized_flux:
The normalized flux values for a given pixel, from all stars.
:param normalized_ivar:
The inverse variance of the normalized flux values for a given pixel,
from all stars.
:param design_matrix:
The design matrix for the spectral model.
:param fixed_scatter:
Keep the pixel scatter term fixed.
:returns:
The optimised label vector coefficients and scatter for this pixel, even
if it was supplied by the user.
"""
design_matrix = utils._unpack_value(design_matrix)
raise a
# This initial theta will also be returned if we have no valid fluxes.
initial_theta = np.hstack([1, np.zeros(design_matrix.shape[1] - 1)])
if np.all(normalized_ivar == 0):
return np.hstack([initial_theta, scatter if fixed_scatter else 0])
# Optimize the parameters.
kwds = {
"maxiter": np.inf,
"maxfun": np.inf,
"disp": False,
"full_output": True
}
kwds.update(kwargs.get("op_kwargs", {}))
args = (normalized_flux, normalized_ivar, design_matrix)
logger.debug("Optimizer kwds: {}".format(kwds))
if fixed_scatter:
p0 = initial_theta
func = _model_pixel_fixed_scatter
args = tuple([scatter] + list(args))
else:
p0 = np.hstack([initial_theta, p0_scatter])
func = _model_pixel
op_params, fopt, direc, n_iter, n_funcs, warnflag = op.fmin_powell(
func, p0, args=args, **kwds)
if warnflag > 0:
logger.warning("Warning: {}".format([
"Maximum number of function evaluations made during optimisation.",
"Maximum number of iterations made during optimisation."
][warnflag - 1]))
return np.hstack([op_params, scatter]) if fixed_scatter else op_params
def _model_pixel(theta, scatter, normalized_flux, normalized_ivar,
design_matrix, **kwargs):
inv_var = normalized_ivar/(1. + normalized_ivar * scatter**2)
return model._chi_sq(theta, design_matrix, normalized_flux, inv_var)
def _model_pixel_fixed_scatter(parameters, normalized_flux, normalized_ivar,
design_matrix, **kwargs):
theta, scatter = parameters[:-1], parameters[-1]
return _model_pixel(
theta, scatter, normalized_flux, normalized_ivar, design_matrix)
def _fit_pixel_with_fixed_scatter(scatter, normalized_flux, normalized_ivar,
design_matrix, **kwargs):
"""
Fit the normalized flux for a single pixel (across many stars) given some
pixel variance term, and return the best-fit theta coefficients.
:param scatter:
The additional scatter to adopt in the pixel.
:param normalized_flux:
The normalized flux values for a single pixel across many stars.
:param normalized_ivar:
The inverse variance of the normalized flux values for a single pixel
across many stars.
:param design_matrix:
The design matrix for the model.
"""
theta, ATCiAinv, inv_var = _fit_theta(normalized_flux, normalized_ivar,
scatter**2, design_matrix)
return_theta = kwargs.get("__return_theta", False)
if ATCiAinv is None:
return 0.0 if not return_theta else (0.0, theta)
# We take inv_var back from _fit_theta because it is the same quantity we
# need to calculate, and it saves us one operation.
Q = model._chi_sq(theta, design_matrix, normalized_flux, inv_var)
return (Q, theta) if return_theta else Q
def _fit_theta(normalized_flux, normalized_ivar, s2, design_matrix):
"""
Fit theta coefficients to a set of normalized fluxes for a single pixel.
:param normalized_flux:
The normalized fluxes for a single pixel (across many stars).
:param normalized_ivar:
The inverse variance of the normalized flux values for a single pixel
across many stars.
:param scatter:
The additional scatter to adopt in the pixel.
:param design_matrix:
The model design matrix.
:returns:
The label vector coefficients for the pixel, the inverse variance matrix
and the total inverse variance.
"""
ivar = normalized_ivar/(1. + normalized_ivar * s2)
CiA = design_matrix * np.tile(ivar, (design_matrix.shape[1], 1)).T
try:
ATCiAinv = np.linalg.inv(np.dot(design_matrix.T, CiA))
except np.linalg.linalg.LinAlgError:
#if logger.getEffectiveLevel() == logging.DEBUG: raise
return (np.hstack([1, [0] * (design_matrix.shape[1] - 1)]), None, ivar)
ATY = np.dot(design_matrix.T, normalized_flux * ivar)
theta = np.dot(ATCiAinv, ATY)
return (theta, ATCiAinv, ivar)
|
peraktong/AnniesLasso
|
AnniesLasso/cannon.py
|
Python
|
mit
| 41,385
|
[
"Gaussian"
] |
256a599c45ec32ccbb2b098790723c07a5d26f6bdf406075aeec0d01cdfae0f2
|
# -*- coding: utf-8 -*-
"""
Ottis — A simple bot for Telegram groups that does web and wiki searches.
Author — @karthikeyankc
"""
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from bs4 import BeautifulSoup
import logging
import requests
import itertools
import json
import os
import urlparse
updater = Updater(token='YOUR TOKEN HERE')
dispatcher = updater.dispatcher
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.INFO)
# Scraper header
headers = {'Accept': 'text/html', 'User-Agent': 'Ottis Telegram Bot 0.0.1'}
'''Helpers'''
#Validate result
def validate_search(query):
wrong_results = ['Images for', 'Videos for', 'News for', 'Maps for']
for w_res in wrong_results:
if query.startswith(w_res):
return False
else:
return True
''' Start '''
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="I'm Ottis! What menial task can I do for you? For now, I can do two tasks! Type /help to know more!")
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
''' Web search '''
# Search scrapper
def websearch(query):
url = "https://www.google.com/search?q="
search_query = query.replace('/search ', '').replace(' ', '%20') # Is there a MessageEntity filter to remove /search?
result_page = requests.get(url+search_query, headers=headers)
soup = BeautifulSoup(result_page.content, 'html.parser')
titles = soup.find_all('h3', {'class':'r'})
links = []
counter = 1
results = []
for title in titles:
links.append(title.find('a'))
for title, link in itertools.izip_longest(titles, links):
title_data = title.text
if counter == 4:
break
if validate_search(title_data) == True and link != None:
link_data = urlparse.urlparse(link.get('href'), allow_fragments=False)
results.append({
'link' : urlparse.parse_qs(link_data.query)['q'][0],
'title' : title_data
})
counter += 1
return results
# Search handler
def search(bot, update):
if update.message.text == "/search":
results = "Search what? You need to add a parameter to the command! Example - '/search the meaning of life'."
else:
search_results = websearch(update.message.text)
results = ""
for result in search_results:
results += "<b>%s</b> - <a href=\"%s\">View Page</a>\n\n" %(result['title'], result['link'])
bot.send_message(chat_id=update.message.chat_id, text=results, parse_mode=telegram.ParseMode.HTML, disable_web_page_preview=True)
search_handler = CommandHandler('search', search)
dispatcher.add_handler(search_handler)
''' Wiki Summary '''
# MW API
def wikisummary(query):
try:
url = "https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro=True&explaintext=True&titles="
search_query = query.replace('/wiki ', '').replace(' ', '%20') # Any filters to remove the command part in a better way?
result_page = requests.get(url+search_query, headers=headers)
j = json.loads(result_page.text)
page_ids = j['query']['pages']
for key, value in page_ids.iteritems():
if len(value['extract']) < 50:
return "Seems there are a lot of Wikis with the same name, try a specific query or visit the disambiguation page for the query here - https://en.wikipedia.org/wiki/%s." %search_query.capitalize()
elif len(value['extract']) >= 4000:
return "The summary is too big to read it here. Read the complete article/page here - https://en.wikipedia.org/wiki/%s." %search_query.capitalize()
else:
return value['extract']
except:
return "I'm sorry! There is some problem with your query! Try using the /search command!"
# Wiki handler
def wiki(bot, update):
summary = wikisummary(update.message.text)
bot.send_message(chat_id=update.message.chat_id, text=summary, disable_web_page_preview=True)
wiki_handler = CommandHandler('wiki', wiki)
dispatcher.add_handler(wiki_handler)
''' Help '''
def help(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="To search the web use /search.\nTo get a summary from Wikis (Only Wikipedia is supported now) use /wiki.")
help_handler = CommandHandler('help', help)
dispatcher.add_handler(help_handler)
''' Start the bot!'''
PORT = int(os.environ.get('PORT', '5000'))
updater.start_webhook(listen='0.0.0.0', port=PORT, url_path='YOUR TOKEN HERE')
updater.bot.setWebhook("https://YOUR APP NAME.herokuapp.com/" + 'YOUR TOKEN HERE')
updater.idle()
|
karthikeyankc/Ottis
|
ottis.py
|
Python
|
gpl-3.0
| 4,446
|
[
"VisIt"
] |
d3304e9561404f338956f45d26989410481b8f7f762403f1654e9761f41b134e
|
from polypasswordhasher import PolyPasswordHasher
THRESHOLD = 10
PASSWORDFILE = 'securepasswords'
def test_1_decode():
# require knowledge of 10 shares to decode others. Create a blank, new
# password file...
pph = PolyPasswordHasher(threshold=THRESHOLD, passwordfile=None)
# create three admins so that any two have the appropriate threshold
pph.create_account('admin', 'correct horse', THRESHOLD / 2)
pph.create_account('root', 'battery staple', THRESHOLD / 2)
pph.create_account('superuser', 'purple monkey dishwasher', THRESHOLD / 2)
# make some normal user accounts...
pph.create_account('alice', 'kitten', 1)
pph.create_account('bob', 'puppy', 1)
pph.create_account('charlie', 'velociraptor', 1)
pph.create_account('dennis', 'menace', 0)
pph.create_account('eve', 'iamevil', 0)
# try some logins and make sure we see what we expect...
assert pph.is_valid_login('alice', 'kitten')
assert pph.is_valid_login('admin', 'correct horse')
assert not pph.is_valid_login('alice', 'nyancat!')
assert pph.is_valid_login('dennis', 'menace')
assert not pph.is_valid_login('dennis', 'password')
# persist the password file to disk
pph.write_password_data(PASSWORDFILE)
def test_2_file():
# let's load it back in
pph = PolyPasswordHasher(threshold=THRESHOLD, passwordfile=PASSWORDFILE)
# The password information is essentially useless alone. You cannot know
# if a password is valid without threshold or more other passwords!!!
try:
pph.is_valid_login('alice', 'kitten')
except ValueError:
pass
else:
print("Can't get here! It's still locked!!!")
# with a threshold (or more) of correct passwords, it decodes and is usable.
pph.unlock_password_data([
('admin', 'correct horse'),
('root', 'battery staple'),
('bob', 'puppy'),
('dennis', 'menace')
])
# now, I can do the usual operations with it...
assert pph.is_valid_login('alice', 'kitten')
pph.create_account('moe', 'tadpole', 1)
pph.create_account('larry', 'fish', 0)
##### TEST PARTIAL VERIFICATION
# require knowledge of 10 shares to decode others. Create a blank, new
# password file...
pph = PolyPasswordHasher(threshold=THRESHOLD, passwordfile=None, partialbytes=2)
# create three admins so that any two have the appropriate threshold
pph.create_account('admin', 'correct horse', THRESHOLD / 2)
pph.create_account('root', 'battery staple', THRESHOLD / 2)
pph.create_account('superuser', 'purple monkey dishwasher', THRESHOLD / 2)
# make some normal user accounts...
pph.create_account('alice', 'kitten', 1)
pph.create_account('bob', 'puppy', 1)
pph.create_account('charlie', 'velociraptor', 1)
pph.create_account('dennis', 'menace', 0)
pph.create_account('eve', 'iamevil', 0)
# try some logins and make sure we see what we expect...
assert pph.is_valid_login('alice', 'kitten')
assert pph.is_valid_login('admin', 'correct horse')
assert not pph.is_valid_login('alice', 'nyancat!')
assert pph.is_valid_login('dennis', 'menace')
assert not pph.is_valid_login('dennis', 'password')
# persist the password file to disk
pph.write_password_data(PASSWORDFILE)
def test_3_partial():
# let's load it back in
pph = PolyPasswordHasher(threshold=THRESHOLD, passwordfile='securepasswords', partialbytes=2)
# The password threshold info should be useful now...
try:
assert pph.is_valid_login('alice', 'kitten')
assert pph.is_valid_login('admin', 'correct horse')
assert not pph.is_valid_login('alice', 'nyancat!')
except ValueError:
print("Partial verification but it is still locked!!!")
try:
pph.create_account('moe', 'tadpole', 1)
except ValueError:
# Should be locked...
pass
else:
print("Partial verification does not allow account creation!")
# with a threshold (or more) of correct passwords, it decodes and is usable.
pph.unlock_password_data([
('admin', 'correct horse'),
('root', 'battery staple'),
('bob', 'puppy'),
('dennis', 'menace')
])
# now, I can do the usual operations with it...
assert pph.is_valid_login('alice', 'kitten')
# including create accounts...
pph.create_account('moe', 'tadpole', 1)
pph.create_account('larry', 'fish', 0)
|
PolyPasswordHasher/PolyPasswordHasher-Python
|
polypasswordhasher/tests/test_polypasswordhasher.py
|
Python
|
mit
| 4,469
|
[
"MOE"
] |
66d3d48ff7afbdfdb4b9ffda027b3467b137b49971c9c6d01495198c7f826553
|
# coding: utf-8
from __future__ import print_function, division, unicode_literals, absolute_import
executable = "anaddb"
from abimkdocs.variables import ValueWithUnit, MultipleValue, Range
#from abipy.abio.abivar_database.variables import ValueWithUnit, MultipleValue, Range, ValueWithConditions
ValueWithConditions = dict
Variable=dict
variables = [
Variable(
abivarname="a2fsmear@anaddb",
varset="anaddb",
vartype="real",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=2e-05,
mnemonics="Alpha2F SMEARing factor",
characteristics=['[[ENERGY]]'],
added_in_version="before_v9",
text=r"""
Smearing width for the Eliashberg $\alpha^2$F function (similar to a phonon DOS),
which is sampled on a finite q and k grid. The Dirac delta functions in energy
are replaced by Gaussians of width **a2fsmear** (by default in Hartree).
""",
),
Variable(
abivarname="alphon@anaddb",
varset="anaddb",
vartype="integer",
topics=['nonlinear_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="ALign PHONon mode eigendisplacements",
added_in_version="before_v9",
text=r"""
In case **alphon** is set to 1, ANADDB will compute linear combinations of the
eigendisplacements of modes that are degenerate (twice or three times), in
order to align the mode effective charges along the cartesian axes. This
option is useful in the mode-by-mode decomposition of the electrooptic tensor,
and to compute the Raman susceptibilities of individual phonon modes. In case
of uniaxial crystals, the z-axis should be chosen along the optical axis.
""",
),
Variable(
abivarname="asr@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_basic', 'PhononBands_basic'],
dimensions="scalar",
defaultval=1,
mnemonics="Acoustic Sum Rule",
commentdefault="was 0 before v5.3",
added_in_version="before_v9",
text=r"""
Governs the imposition of the Acoustic Sum Rule (ASR).
* 0 --> no ASR for interatomic force constants is imposed.
* 1 or 2 --> the ASR for interatomic force constants is imposed by modifying
the on-site interatomic force constants, in a symmetric way ( **asr** =2),
or in the more general case, unconstrained way ( **asr** =1).
More detailed explanations: the total energy should be invariant under
translation of the crystal as a whole. This would guarantee that the three
lowest phonon modes at Gamma have zero frequency (Acoustic Sum Rule - ASR).
Unfortunately, the way the DDB is generated (presence of a discrete grid of
points for the evaluation of the exchange-correlation potential and energy)
slightly breaks the translational invariance. Well, in some pathological
cases, the breaking can be rather important.
Two quantities are affected: the interatomic forces (or dynamical matrices),
and the effective charges. The ASR for the effective charges is called the
charge neutrality sum rule, and will be dealt with by the variable
[[anaddb:chneut]]. The ASR for the interatomic forces can be restored, by
modifying the interatomic force of the atom on itself, (called self-IFC), as
soon as the dynamical matrix at Gamma is known. This quantity should be equal
to minus the sum of all interatomic forces generated by all others atoms
(action-reaction law!), which is determined by the dynamical matrix at Gamma.
So, if **asr** is non-zero, the correction to the self-force will be
determined, and the self-force will be imposed to be consistent with the ASR.
This correction will work if IFCs are computed ([[anaddb:ifcflag]]/=0), as
well as if the IFCs are not computed ([[anaddb:ifcflag]]==0). In both cases,
the phonon frequencies will not be the same as the ones determined by the
output of abinit, RF case. If you want to check that the DDB is correct, by
comparing phonon frequencies from abinit and anaddb, you should turn off both
**asr** and [[anaddb:chneut]].
Until now, we have not explained the difference between **asr** =1 and **asr**
=2. This is rather subtle. In some local low-symmetry cases (basically the
effective charges should be anisotropic), when the dipole-dipole contribution
is evaluated and subtracted, the ASR cannot be imposed without breaking the
symmetry of the on-site interatomic forces. That explains why two options are
given: the second case ( **asr** =2, sym) does not entirely impose the ASR,
but simply the part that keeps the on-site interatomic forces symmetric (which
means that the acoustic frequencies do not go to zero exactly), the first case
( **asr** =1, asym) imposes the ASR, but breaks the symmetry. **asr** =2 is to
be preferred for the analysis of the interatomic force constant in real space,
while **asr** =1 should be used to get the phonon band structure.
(NOTE: in order to confuse even more the situation, it seems that the acoustic
phonon frequencies generated by the code for both the sym and asym options are
exactly the same likely due to an extra symmetrisation in the
diagonalisation routine. Of course, when the matrix at Gamma has been
generated from IFCs coming from dynamical matrices none of which are Gamma,
the breaking of the ASR is rather severe. In order to clear the situation, one
should use a diagonalisation routine for non-hermitian matrices. So, at the
present status of understanding, one should always use the **asr** =2 option
).
""",
),
Variable(
abivarname="atifc@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic'],
dimensions=['[[anaddb:natifc]]'],
defaultval=0,
mnemonics="AToms for IFC analysis",
added_in_version="before_v9",
text=r"""
The actual numbers of the atoms for which the interatomic force constant have
to be written and eventually analysed.
WARNING: there will be an in-place change of meaning of atifc (this is
confusing, and should be taken away in one future version - sorry for this).
""",
),
#FIXME NOTE XG20170811: apparently no effective test for this input variable. Also,
# the description is strange ...!
Variable(
abivarname="band_gap@anaddb",
varset="anaddb",
vartype="real",
topics=['ElPhonTransport_expert'],
dimensions="scalar",
defaultval=999.0,
mnemonics="BAND GAP",
characteristics=['[[ENERGY]]'],
added_in_version="before_v9",
text=r"""
Allow setting the target band gap, in eV. ([[anaddb:elphflag]]=1).
""",
),
Variable(
abivarname="brav@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=1,
mnemonics="BRAVais",
added_in_version="before_v9",
text=r"""
Allows to specify the Bravais lattice of the crystal, in order to help to
generate a grid of special q points.
* 1 --> all the lattices (including FCC, BCC and hexagonal)
* 2 --> specific for Face Centered lattices
* 3 --> specific for Body Centered lattices
* 4 --> specific for the Hexagonal lattice
Note that in the latter case, the [[rprim]] of the unit cell has to be
1.0 0.0 0.0
-.5 sqrt(3)/2 0.0
0.0 0.0 1.0
in order for the code to work properly.
Warning: the generation of q-points in anaddb is rather old-fashioned, and
should be replaced by routines used by the main abinit code.
Warning:
The value **brav** = -1 is also possible. It is used for backwards compatibility:
it corresponds to **brav** = 1, with the weights for the
interpolation of the phonon band structure determined by another (now obsolete) algorithm
than the default **brav** = 1 algorithm
based on Wigner-Seitz cells (new as v8.7). The default algorithm has a correct treatment of symmetries.
""",
),
Variable(
abivarname="chneut@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Integer for CHarge NEUTrality treatment",
added_in_version="before_v9",
text=r"""
Set the treatment of the Charge Neutrality requirement for the effective charges.
* chneut=0 --> no ASR for effective charges is imposed
* chneut=1 --> the ASR for effective charges is imposed by giving to each atom
an equal portion of the missing charge. See Eq.(48) in [[cite:Gonze1997a]].
* chneut=2 --> the ASR for effective charges is imposed by giving to each atom a portion
of the missing charge proportional to the screening charge already present.
See Eq.(49) in [[cite:Gonze1997a]].
More detailed explanation: the sum of the effective charges in the unit cell
should be equal to zero. It is not the case in the DDB, and this sum rule is
sometimes strongly violated. In particular, this will make the lowest
frequencies at Gamma non-zero. There is no "best" way of imposing the ASR on effective charges.
""",
),
Variable(
abivarname="dieflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="DIElectric FLAG",
added_in_version="before_v9",
text=r"""
Frequency-dependent dielectric tensor flag.
* 0 --> No dielectric tensor is calculated.
* 1 --> The frequency-dependent dielectric tensor is calculated.
Requirements for preceding response-function DDB generation run: electric-field and full atomic-displacement responses.
Set [[rfstrs]] = 1, 2, or 3 (preferably 3). Set [[rfatpol]] and [[rfdir]] to do a full calculation of phonons at Q=0.
The frequencies are defined by the [[anaddb:nfreq]], [[anaddb:frmin]], [[anaddb:frmax]] variables.
Also, the generalized Lyddane-Sachs-Teller relation will be used as an independent check of the dielectric tensor
at zero frequency (this for the directions defined in the phonon list 2. See [[anaddb:nph2l]].
* 2 --> Only the electronic dielectric tensor is calculated. It corresponds to a zero-frequency homogeneous field,
with quenched atomic positions. For large band gap materials, this quantity is measurable because the
highest phonon frequency is on the order of a few tenths of eV, and the band gap is larger than 5eV.
Requirements for preceding response-function DDB generation: electric-field response.
Set [[rfelfd]] = 1 or 3 (preferably 3). Note that the same information on the electronic dielectric tensor
will be printed in the .out file of the [[rfelfd]] run.
* 3 --> Compute and print the relaxed-ion dielectric tensor.
Requirements for preceding response-function DDB generation run:
electric-field and full atomic-displacement responses. Set [[rfstrs]] = 1, 2, or 3 (preferably 3).
Set [[rfatpol]] and [[rfdir]] to do a full calculation of phonons at Q=0
(needed because the inverse of force-constant tensor is required). Furthermore, in the anaddb input file
the variable [[anaddb:nph2l]] must be nonzero in order to initiate computation of atomic displacements.
If only the dielectric response is needed it is sufficient to set [[anaddb:nph2l]] to 1 and leave [[anaddb:qph2l]]
at its default value (the Gamma point). Note that the relaxed-ion dielectric tensor computed here can also be obtained
as the zero-frequency limit of the frequency-dependent dielectric tensor using input variables **dieflag** =1 and [[anaddb:frmin]]=0.0.
(The results obtained using these two approaches should agree to good numerical precision.)
The ability to compute and print the static dielectric tensor here is provided for completeness
and consistency with the other tensor quantities that are computed in this section of the code.
* 4 --> Calculate dielectric tensor of both relaxed ion and free stress.
We need information of internal strain and elastic tensor (relaxed ion) in this computation.
So please set: [[anaddb:elaflag]]=2,3,4 or 5 and [[anaddb:instrflag]]=1
""",
),
Variable(
abivarname="dipdip@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic'],
dimensions="scalar",
defaultval=1,
mnemonics="DIPole-DIPole interaction",
added_in_version="before_v9",
text=r"""
* 0 --> the dipole-dipole interaction is not handled separately in the treatment of the interatomic forces.
This option is available for testing purposes or if effective charge and/or dielectric tensor are not available
in the DDB file. In semiconductors, this option gives results much less accurate than **dipdip** = 1.
Using this value in metals is fine since the interatomic forces are usually short-ranged.
* 1 --> the dipole-dipole interaction is subtracted from the dynamical matrices before Fourier transform,
so that only the short-range part is handled in real space. Of course, it is reintroduced analytically
when the phonon spectrum is interpolated, or if the interatomic force constants have to be analysed in real space.
Note that the Ewald contribution to the dynamical matrix consists of two terms: a G-space sum and
and another (expensive) sum in R-space.
See Phys. Rev. B 55, 10355 (1997) [[cite:Gonze1997a]], equations (71) to (75).
This approach is very accurate but the computation of the R-space sum is rather expensive, especially
when one has to interpolate many q-points e.g. for PHDOS computations.
See dipdip -1 option below for a faster algorithm.
* -1 --> Similar to +1 with the difference that the code computes a material-dependent width for
the Gaussians that will hopefully make the Ewald real-space summation unnecessary.
This option is **much faster** that dipdip 1 and is activated by default when
[[anaddb:dipquad]] or [[anaddb:quadquad]] are set to 1.
It is recommended to check that calculations with dipdip = 1 and -1 (both with dipquad = 0 and quadquad = 0)
lead to identical results. Otherwise increase the resolution of the q-point grid and repeat this test.
""",
),
Variable(
abivarname="dipquad@anaddb",
varset="anaddb",
vartype="integer",
topics=['longwave_basic'],
dimensions="scalar",
defaultval=1,
mnemonics="DIPole-QUADdrupole interaction",
characteristics=['[[DEVELOP]]'],
added_in_version="v9",
text=r"""
* 0 --> the dipole-quadrupole interaction is not handled separately in the treatment of the interatomic forces.
* 1 --> the dipole-quadrupole interaction is subtracted from the dynamical matrices before Fourier transform,
so that only the short-range part is handled in real space. Of course, it is reintroduced analytically
when the phonon spectrum is interpolated. Requires a preceding generation of 3rd order DDB with a [[lw_qdrpl]] = 1
or a [[lw_flexo]] = 1 or 2 run.
!!! important
The default value is 1.
This means that the dipole-quadrupole interaction is always included **provided** the DDB file contains
the dynamical quadrupoles.
If the DDB file does not contain the dynamical quadrupoles, this variable is automatically set to zero at runtime.
""",
),
Variable(
abivarname="quadquad@anaddb",
varset="anaddb",
vartype="integer",
topics=['longwave_basic'],
dimensions="scalar",
defaultval=1,
mnemonics="QUADdrupole-QUADdrupole interaction",
characteristics=['[[DEVELOP]]'],
added_in_version="v9",
text=r"""
* 0 --> the quadrupole-quadrupole interaction is not handled separately in the treatment of the interatomic forces.
* 1 --> the quadrupole-quadrupole interaction is subtracted from the dynamical matrices before Fourier transform,
so that only the short-range part is handled in real space. Of course, it is reintroduced analytically
when the phonon spectrum is interpolated. Requires a preceding generation of 3rd order DDB with a [[lw_qdrpl]] = 1
or a [[lw_flexo]] = 1 or 2 run.
!!! important
The default value is 1.
This means that the quadrupole-quadrupole interaction is always included **provided** the DDB file contains
the dynamical quadrupoles.
If the DDB file does not contain the dynamical quadrupoles, this variable is automatically set to zero at runtime.
""",
),
Variable(
abivarname="dosdeltae@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval="4.5E-06 Hartree = 1 cm$^{-1}$",
mnemonics="DOS DELTA in Energy",
added_in_version="before_v9",
text=r"""
The input variable **dosdeltae** is used to define the step of the frequency
grid used to calculate the phonon density of states when [[anaddb:prtdos]] = 1.
""",
),
Variable(
abivarname="dossmear@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval="4.5E-05 Hartree = 10 cm$^{-1}$",
mnemonics="DOS SMEARing value",
characteristics=['[[ENERGY]]'],
added_in_version="before_v9",
text=r"""
**dossmear** defines the gaussian broadening used to calculate the phonon
density of states when [[anaddb:prtdos]] = 1.
""",
),
Variable(
abivarname="dossum@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="DOS SUM",
added_in_version="before_v9",
text=r"""
Set the flag to 1 to calculate the two phonon dos density of states. Sum and
Difference for the Gamma point. The DOS is converged and based on that, the
sum and difference are reported in the output file.
""",
),
Variable(
abivarname="dostol@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0.25,
mnemonics="DOS TOLerance",
added_in_version="before_v9",
text=r"""
The relative tolerance on the phonon density of state. This number will
determine when the series of grids with which the DOS is calculated can be
stopped, i.e. the mean of the relative change going from one grid to the next
bigger is smaller than **dostol**.
""",
),
Variable(
abivarname="eivec@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_useful', 'PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="EIgenVECtors",
added_in_version="before_v9",
text=r"""
* 0 --> do not write the phonon eigenvectors;
* 1 or 2 --> write the phonon eigenvectors;
* 4 --> generate output files for band2eps (drawing tool for the phonon band structure);
""",
),
Variable(
abivarname="elaflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['Elastic_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="ELAstic tensor FLAG",
added_in_version="before_v9",
text=r"""
Flag for calculation of elastic and compliance tensors
* 0 --> No elastic or compliance tensor will be calculated.
* 1 --> Only clamped-ion elastic and compliance tensors will be calculated.
Requirements for preceding response-function DDB generation run: Strain perturbation.
Set [[rfstrs]] to 1, 2, or 3. Note that [[rfstrs]]=3 is recommended so that responses
to both uniaxial and shear strains will be computed.
* 2 --> Both relaxed- and clamped-ion elastic and compliance tensor will be calculated,
but only the relaxed-ion quantities will be printed. The input variable [[anaddb:instrflag]] should also be set to 1,
because the internal-strain tensor is needed to compute the relaxed-ion corrections.
Requirements for preceding response-function DDB generation run:
Strain and atomic-displacement responses at Q=0. Set [[rfstrs]] = 1, 2, or 3 (preferably 3).
Set [[rfatpol]] and [[rfdir]] to do a full calculation of phonons at Q=0
(needed because the inverse of force-constant tensor is required).
* 3 --> Both relaxed and clamped-ion elastic and compliance tensors will be printed out.
The input variable [[anaddb:instrflag]] should also be set to 1.
Requirements for preceding response-function DDB generation run: Same as for **elaflag** =2.
* 4 --> Calculate the elastic and compliance tensors (relaxed ion) at fixed displacement field,
the relaxed-ion tensors at fixed electric field will be printed out too, for comparison.
When **elaflag** =4, we need the information of internal strain and relaxed-ion dielectric tensor
to build the whole tensor, so we need set [[anaddb:instrflag]]=1 and [[anaddb:dieflag]]=3 or 4 .
* 5 --> Calculate the relaxed ion elastic and compliance tensors, considering the stress left inside cell.
At the same time, bare relaxed ion tensors will still be printed out for comparison.
In this calculation, stress tensor is needed to compute the correction term, so one is supposed
to merge the first order derivative data base (DDB file) with the second order derivative data base (DDB file)
into a new DDB file, which can contain both information. And the program will also check for the users.
""",
),
Variable(
abivarname="elph_fermie@anaddb",
varset="anaddb",
vartype="real",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=0.0,
mnemonics="ELectron-PHonon FERMI Energy",
characteristics=['[[ENERGY]]'],
added_in_version="before_v9",
text=r"""
If non-zero, will fix artificially the value of the Fermi energy (e.g. for semiconductors),
in the electron-phonon case. Note that [[anaddb:elph_fermie]] and [[anaddb:ep_extrael]] should not be used at the same time.
([[anaddb:elphflag]]=1).
""",
),
Variable(
abivarname="elphflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononWidth_compulsory', 'ElPhonTransport_compulsory'],
dimensions="scalar",
defaultval=0,
mnemonics="ELectron-PHonon FLAG",
added_in_version="before_v9",
text=r"""
If **elphflag** is 1, anaddb performs an analysis of the electron-phonon coupling.
""",
),
Variable(
abivarname="elphsmear@anaddb",
varset="anaddb",
vartype="real",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval="0.01 Hartree",
mnemonics="ELectron-PHonon SMEARing factor",
characteristics=['[[ENERGY]]'],
added_in_version="before_v9",
text=r"""
Smearing width for the Fermi surface integration (in Hartree by default).
""",
),
Variable(
abivarname="enunit@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_useful', 'PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="ENergy UNITs",
added_in_version="before_v9",
text=r"""
Give the energy for the phonon frequency output (in the output file, not in
the console log file, for which Hartree units are used).
* 0 --> Hartree and cm$^{-1}$;
* 1 --> meV and Thz;
* 2 --> Hartree, cm$^{-1}$, meV, Thz, and Kelvin.
""",
),
Variable(
abivarname="ep_b_max@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Electron Phonon integration Band MAXimum",
added_in_version="before_v9",
text=r"""
When set, and [[anaddb:telphint]] is equal to 2, this variable determines the
k-point integration weights which are used in the electron-phonon part of the
code. Instead of weighting according to a distance from the Fermi surface, an
equal weight is given to all k-points, for all bands between
[[anaddb:ep_b_min]] and **ep_b_max**.
""",
),
Variable(
abivarname="ep_b_min@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Electron Phonon integration Band MINimum",
added_in_version="before_v9",
text=r"""
As for [[anaddb:ep_b_max]], but **ep_b_min** is the lower bound on the band
integration, instead of the upper bound. See also [[anaddb:telphint]].
""",
),
Variable(
abivarname="ep_extrael@anaddb",
varset="anaddb",
vartype="real",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=0.0,
mnemonics="Electron-Phonon EXTRA ELectrons",
added_in_version="before_v9",
text=r"""
If non-zero, will fix artificially the number of extra electrons per unit cell
(positive for electron doping), according to a doped case. (e.g. for
semiconductors), in the electron-phonon case. This field can also be filled
with doping concentration, in the units of cm$^{-3}$ (negative for electron
doping). Note that **ep_extrael** and [[anaddb:elph_fermie]] should not be
used at the same time. ([[anaddb:elphflag]]=1).
""",
),
Variable(
abivarname="ep_int_gkk@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononWidth_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Electron-Phonon INTerpolation of GKK",
added_in_version="before_v9",
text=r"""
This flag determines whether the interpolation of the electron-phonon matrix
elements over the coarse k-grid is done ( **ep_int_gkk** 1) before summing
with appropriate Fermi Surface weights. In this way, the two integration
weights are treated symmetrically.
""",
),
Variable(
abivarname="ep_keepbands@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="Electron-Phonon KEEP dependence on electron BANDS",
added_in_version="before_v9",
text=r"""
This flag determines whether the dependency of the electron-phonon matrix
elements on the electron band index is kept ( **ep_keepbands** 1), or whether
it is summed over immediately with appropriate Fermi Surface weights. For
transport calculations **ep_keepbands** must be set to 1.
""",
),
Variable(
abivarname="ep_nqpt@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="Electron Phonon Number of Q PoinTs",
added_in_version="before_v9",
text=r"""
In case a non-uniform grid of q-points is being used, for direct calculation
of the electron-phonon quantities without interpolation, this specifies the
number of q-points to be found in the GKK file, independently of the normal anaddb input (ngqpt)
""",
),
Variable(
abivarname="ep_nspline@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=20,
mnemonics="Electron Phonon Number for SPLINE interpolation",
added_in_version="before_v9",
text=r"""
The scale factor for cubic spline interpolation, only used in the relaxation
time approximation ([[anaddb:ifltransport]]=3).
""",
),
Variable(
abivarname="ep_prt_yambo@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonInt_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="Electron Phonon PRinTout YAMBO data",
added_in_version="before_v9",
text=r"""
For electron-phonon calculations, print out matrix elements for use by the yambo code.
""",
),
Variable(
abivarname="ep_qptlist@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononWidth_useful'],
dimensions=[3, '[[anaddb:ep_nqpt]]'],
defaultval="(3*[[anaddb:ep_nqpt]])*0",
mnemonics="Electron Phonon Q PoinT LIST",
added_in_version="before_v9",
text=r"""
In case a non-uniform grid of q-points is being used, for direct calculation
of the electron-phonon quantities without interpolation, this specifies the
q-points to be found in the GKK file, independently of the normal anaddb input
(ngqpt), in reduced coordinates of the reciprocal space lattice.
""",
),
Variable(
abivarname="ep_scalprod@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononWidth_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="DO SCALar PRODuct for gkk matrix elements",
added_in_version="before_v9",
text=r"""
The input variable **ep_scalprod** is a flag determining whether the scalar
product of the electron-phonon matrix elements (gkk) with the phonon
displacement vectors is done before or after interpolation. Doing so before (
**ep_scalprod** 1) makes phonon linewidths smoother but introduces an error,
as the interpolated phonons and gkk are not diagonalized in the same basis.
Doing so afterwards ( **ep_scalprod** 0) eliminates the diagonalization error,
but sometimes gives small spikes in the phonon linewidths near band crossings
or high symmetry points. I do not know why...
""",
),
Variable(
abivarname="flexoflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['longwave_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="FLEXOelectric tensor FLAG",
characteristics=['[[DEVELOP]]'],
added_in_version="v9",
text=r"""
Flag for calculation of bulk flexoelectrics tensors
* 0 --> No flexoelectric tensor is calculated.
* 1 --> All the contributions to the bulk flexoelectric tensor (clamped-ion, mixed and lattice-mediated) and
related quantities (piezoelectric and flexoelectric internal strain tensors and Lagrange elastic tensors)
are calculated. Requires a preceding generation of 2nd and 3rd order DDB with a [[lw_flexo]] = 1 run.
* 2 --> The clamped-ion flexoelectric tensor is printed. Requires a preceding generation of 2nd and 3rd order
DDB with a [[lw_flexo]] = 1 or 2 run.
* 3 --> The mixed flexoelectric tensor is calculated and printed along with the piezoelectric internal strain tensors.
Requires a preceding generation of 2nd and 3rd order DDB with a [[lw_flexo]] = 1 or 3 run.
* 4 --> The lattice-mediated flexoelectric tensor is calculated and printed along with the piezoelectric and flexoelectric
internal strain tensors and the Lagrange elastic tensors.
Requires a preceding generation of 2nd and 3rd order DDB with a [[lw_flexo]] = 1 or 4 run.
""",
),
Variable(
abivarname="freeze_displ@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_expert'],
dimensions="scalar",
defaultval=0.0,
mnemonics="FREEZE DISPLacement of phonons into supercells",
added_in_version="before_v9",
text=r"""
If different from zero, **freeze_displ** will be used as the amplitude of a
phonon displacement. For each q-point and mode in the [[anaddb:qph1l]] list, a
file will be created containing a supercell of atoms with the corresponding
phonon displacements frozen in. This is typically useful to freeze a soft
phonon mode, then let it relax in abinit afterwards.
**freeze_displ** is unitless (for abinit), but has a physical meaning: it is related to the
Bose distribution $n_B$ and the frequency $\omega_{qs}$ of the phonon mode. At a given
temperature $T$, **freeze_displ** will give the root mean square displacement of
atoms (along with the displacement vectors, which are in *Bohr*). In atomic
units **freeze_displ** = $\sqrt{< \hat{x}^2 >} = \sqrt{(0.5 + n_B(\omega_{qs}/kT))/ \omega_{qs}}$
where $\hat{x}\propto \hat{a} + \hat{a}^\dagger$ is the
displacement operator and $a^\dagger$ and $a$ are the phonon creation and annihilation operators respectively.
Typical values are 50-200 for a frequency of a few hundred cm$^{-1}$ and room temperature.
If all you want is to break the symmetry in the right direction, any reasonable value
(10-50) should be ok.
**WARNING**: this will create a _lot_ of files (3*natom*nph1l), so it should
be used with a small number [[anaddb:nph1l]] of q-points for interpolation.
""",
),
Variable(
abivarname="frmax@anaddb",
varset="anaddb",
vartype="real",
topics=['Phonons_useful'],
dimensions="scalar",
defaultval=10.0,
mnemonics="FRequency MAXimum",
added_in_version="before_v9",
text=r"""
Value of the largest frequency for the frequency-dependent dielectric tensor, in Hartree.
""",
),
Variable(
abivarname="frmin@anaddb",
varset="anaddb",
vartype="real",
topics=['Phonons_useful'],
dimensions="scalar",
defaultval=0.0,
mnemonics="FRequency MINimum",
added_in_version="before_v9",
text=r"""
Value of the lowest frequency for the frequency-dependent dielectric tensor, in Hartree.
""",
),
Variable(
abivarname="gkqwrite@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonInt_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="GKk for input Q grid to be WRITtEn to disk",
added_in_version="before_v9",
text=r"""
Flag to write out the reciprocal space matrix elements to a disk file named
gkqfile. This reduces strongly the memory needed for an electron-phonon run.
""",
),
Variable(
abivarname="gruns_ddbs@anaddb",
varset="anaddb",
vartype="string",
topics=['Temperature_useful'],
dimensions=['[[anaddb:gruns_nddbs]]'],
defaultval="Empty",
mnemonics="GRUNeiSen DDBS",
added_in_version="before_v9",
text=r"""
List of strings with the paths of the DDB files used for the calculation of
the Gruneisen parameters. Each string must be enclosed by quotation marks. The
number of DDB files is defined by [[anaddb:gruns_nddbs]] (possible values are:
3,5,7,9) The DDB files correspond to phonon calculations performed at
different volumes (usually ± 1% of the equilibrium volume). The DDB files must
be ordered according to the volume of the unit cell (the DDB with smallest
volume comes first) and the volume increment must be constant. The code
computes the derivative of the dynamical matrix wrt the volume using central finite difference.
""",
),
Variable(
abivarname="gruns_nddbs@anaddb",
varset="anaddb",
vartype="integer",
topics=['Temperature_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="GRUNeiSen Number of DDB files",
added_in_version="before_v9",
text=r"""
This variable defines the number of DDB files (read from [[anaddb:gruns_ddbs]])
used for the calculation of the Gruneisen parameters.
""",
),
Variable(
abivarname="iatfix@anaddb",
varset="anaddb",
vartype="integer",
topics=['ConstrainedPol_useful'],
dimensions=['[[anaddb:natfix]]'],
defaultval=0,
mnemonics="Indices of the AToms that are FIXed",
added_in_version="before_v9",
text=r"""
Indices of the atoms that are fixed during a structural relaxation at
constrained polarization. See [[anaddb:polflag]].
""",
),
Variable(
abivarname="iatprj_bs@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions=['[[anaddb:natprj_bs]]'],
defaultval="0*'[[anaddb:natprj_bs]]'",
mnemonics="Indices of the AToms for the PRoJection of the phonon Band Structure",
added_in_version="before_v9",
text=r"""
Indices of the atoms that are chosen for projection of the phonon
eigenvectors, giving a weighted phonon band structure file.
""",
),
Variable(
abivarname="ifcana@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="IFC ANAlysis",
added_in_version="before_v9",
text=r"""
* 0 --> no analysis of interatomic force constants;
* 1 --> analysis of interatomic force constants.
If the analysis is activated, one get the trace of the matrices between pairs
of atoms, if [[anaddb:dipdip]] is 1, get also the trace of the short-range and
electrostatic part, and calculate the ratio with the full matrix; then define
a local coordinate reference (using the next-neighbour coordinates), and
express the interatomic force constant matrix between pairs of atoms in that
local coordinate reference (the first vector is along the bond; the second
vector is along the perpendicular force exerted on the generic atom by a
longitudinal displacement of the neighbouring atom - in case it does not
vanish; the third vector is perpendicular to the two other) also calculate
ratios with respect to the longitudinal force constant ( the (1,1) element of
the matrix in local coordinates).
""",
),
Variable(
abivarname="ifcflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_compulsory'],
dimensions="scalar",
defaultval=0,
mnemonics="Interatomic Force Constants FLAG",
added_in_version="before_v9",
text=r"""
* 0 --> do all calculations directly from the DDB, without the use of the interatomic force constant.
* 1 --> calculate and use the interatomic force constants for interpolating the phonon spectrum
and dynamical matrices at every q wavevector, and eventually analyse the interatomic force constants,
according to the informations given by [[anaddb:atifc]], [[anaddb:dipdip]], [[anaddb:ifcana]], [[anaddb:ifcout]],
[[anaddb:natifc]], [[anaddb:nsphere]], [[anaddb:rifcsph]]
More detailed explanations: if the dynamical matrices are known on a regular
set of wavevectors, they can be used to get the interatomic forces, which are
simply their Fourier transform. When non-analyticities can been removed by the
use of effective charge at Gamma (option offered by putting [[anaddb:dipdip]] to 1),
the interatomic forces are known to decay rather fast (in real space).
The interatomic forces generated from a small set of dynamical matrices could
be of sufficient range to allow the remaining interatomic forces to be
neglected. This gives a practical way to interpolate the content of a small
set of dynamical matrices, because dynamical matrices can everywhere be
generated starting from this set of interatomic force constants. It is
suggested to always use **ifcflag** =1. The **ifcflag** =0 option is available
for checking purpose, and if there is not enough information in the DDB.
""",
),
Variable(
abivarname="ifcout@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="IFC OUTput",
added_in_version="before_v9",
text=r"""
For each atom in the list [[anaddb:atifc]] (generic atoms), **ifcout** give
the number of neighbouring atoms for which the ifc's will be output (written)
and eventually analysed. The neighbouring atoms are selected by decreasing
distance with respect to the generic atom.
""",
),
Variable(
abivarname="ifltransport@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="IFLag for TRANSPORT",
added_in_version="before_v9",
text=r"""
if **ifltransport** =1 (LOVA) or **ifltransport** =2 (non-LOVA), anaddb calculates the
transport properties: electrical and thermal resistivities from electron-
phonon interactions in the variational approach. If **ifltransport** =3, anaddb
calculates the k-dependent relaxation time. (needs [[anaddb:elphflag]] = 1)
""",
),
Variable(
abivarname="instrflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['Elastic_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="INternal STRain FLAG",
added_in_version="before_v9",
text=r"""
Internal strain tensor flag.
* 0 --> No internal-strain calculation.
* 1 --> Print out both force-response and displacement-response internal-strain tensor.
Requirements for preceding response-function DDB generation run: Strain and full atomic-displacement responses.
Set [[rfstrs]] = 1, 2, or 3 (preferably 3). Set [[rfatpol]] and [[rfdir]] to do a full calculation of phonons at Q=0.
""",
),
Variable(
abivarname="istrfix@anaddb",
varset="anaddb",
vartype="integer",
topics=['ConstrainedPol_useful'],
dimensions=['[[anaddb:nstrfix]]'],
defaultval=0,
mnemonics="Index of STRain FIXed",
added_in_version="before_v9",
text=r"""
Indices of the elements of the strain tensor that are fixed during a
structural relaxation at constrained polarisation:
* 0 --> No elastic or compliance tensor will be calculated.
* 1 --> Only clamped-ion elastic and compliance tensors will be calculated. Requirements for
preceding response-function DDB generation run: Strain perturbation. Set [[rfstrs]] to 1, 2, or 3.
Note that [[rfstrs]]>=3 is recommended so that responses to both uniaxial and shear strains will be computed.
* 2 --> Both relaxed- and clamped-ion elastic and compliance tensor will be calculated, but only
the relaxed-ion quantities will be printed. The input variable [[anaddb:instrflag]] should also be set to 1,
because the internal-strain tensor is needed to compute the relaxed-ion corrections.
Requirements for preceding response-function DDB generation run: Strain and atomic-displacement responses at Q=0.
Set [[rfstrs]] = 1, 2, or 3 (preferably 3). Set [[rfatpol]] and [[rfdir]] to do a full calculation
of phonons at Q=0 (needed because the inverse of force-constant tensor is required).
* 3 --> Both relaxed and clamped-ion elastic and compliance tensors will be printed out.
The input variable [[anaddb:instrflag]] should also be set to 1.
Requirements for preceding response-function DDB generation run: Same as for [[anaddb:elaflag]]=2.
* 4 --> Calculate the elastic and compliance tensors (relaxed ion) at fixed displacement field,
the relaxed-ion tensors at fixed electric field will be printed out too, for comparison.
When [[anaddb:elaflag]]=4, we need the information of internal strain and relaxed-ion dielectric tensor
to build the whole tensor, so we need to set [[anaddb:instrflag]]=1 and [[anaddb:dieflag]]=3 or 4.
* 5 --> Calculate the relaxed ion elastic and compliance tensors, considering the stress left inside cell.
At the same time, bare relaxed ion tensors will still be printed out for comparison.
In this calculation, stress tensor is needed to compute the correction term, so one supposed
to merge the first order derivative data base (DDB file) with the second order derivative data base (DDB file)
into a new DDB file, which can contain both information. And the program will also check for the users.
See [[anaddb:polflag]].
""",
),
Variable(
abivarname="kptrlatt@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_basic', 'PhononWidth_basic'],
dimensions=[3, 3],
defaultval="9*0",
mnemonics="K PoinT Reciprocal LATTice",
added_in_version="before_v9",
text=r"""
Unnormalized lattice vectors for the k-point grid in reciprocal space (see
[[kptrlatt]] abinit variable definitionas well). Input needed in electron-phonon
calculations using nesting functions or tetrahedron integration.
""",
),
Variable(
abivarname="kptrlatt_fine@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_expert'],
dimensions=[3, 3],
defaultval="9*0",
mnemonics="K PoinT Reciprocal LATTice for FINE grid",
added_in_version="before_v9",
text=r"""
As kptrlatt above, but for a finer grid of k-points. Under development.
Does not work yet, as of |today|.
""",
),
Variable(
abivarname="mustar@anaddb",
varset="anaddb",
vartype="real",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=0.1,
mnemonics="MU STAR",
added_in_version="before_v9",
text=r"""
Average electron-electron interaction strength, for the computation of the
superconducting Tc using Mc-Millan's formula.
""",
),
Variable(
abivarname="natfix@anaddb",
varset="anaddb",
vartype="integer",
topics=['ConstrainedPol_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of AToms FIXed",
added_in_version="before_v9",
text=r"""
Number of atoms that are fixed during a structural optimisation at constrained
polarization. See [[anaddb:polflag]].
""",
),
Variable(
abivarname="natifc@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of AToms for IFC analysis",
added_in_version="before_v9",
text=r"""
Give the number of atoms for which IFCs are written and eventually analysed.
The list of these atoms is provided by [[anaddb:atifc]].
""",
),
Variable(
abivarname="natprj_bs@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of AToms for PRoJection of the Band Structure",
added_in_version="before_v9",
text=r"""
Give the number of atoms for which atomic-projected phonon band structures
will be output. The list of these atoms is provided by [[anaddb:iatprj_bs]].
""",
),
Variable(
abivarname="nchan@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_expert'],
dimensions="scalar",
defaultval=800,
mnemonics="Number of CHANnels",
added_in_version="before_v9",
text=r"""
The number of channels of width 1 cm$^{-1}$ used in calculating the phonon density
of states through the histogram method, or, equivalently, the largest
frequency sampled. The first channel begins at 0.
""",
),
Variable(
abivarname="ndivsm@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic', 'PhononWidth_useful'],
dimensions="scalar",
defaultval=20,
mnemonics="Number of DIVisions for the SMallest segment",
added_in_version="before_v9",
text=r"""
This variable defines the number of divisions used to sample the smallest
segment of the q-path used for the phonon band structure. If ndivsm is
specified in the input file, the code will automatically generate the points
along the path using the coordinates given in the array [[anaddb:qpath]].
""",
),
Variable(
abivarname="nfreq@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_useful'],
dimensions="scalar",
defaultval=1,
mnemonics="Number of FREQuencies",
added_in_version="before_v9",
text=r"""
Number of frequencies wanted for the frequency-dependent dielectric tensor.
Should be positive. See [[anaddb:dieflag]]. The code will take **nfreq**
equidistant values from [[anaddb:frmin]] to [[anaddb:frmax]].
""",
),
Variable(
abivarname="ng2qpt@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions=[3],
defaultval="3*0",
mnemonics="Number of Grids points for Q PoinTs (grid 2)",
added_in_version="before_v9",
text=r"""
The Monkhorst-Pack grid linear dimensions, for the finer of the series of fine grids.
Used for the integration of thermodynamical functions (Bose-Einstein distribution) or for the DOS.
""",
),
Variable(
abivarname="ngqpt@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_basic', 'PhononBands_basic'],
dimensions=[3],
defaultval="3*0",
mnemonics="Number of Grids points for Q PoinTs",
added_in_version="before_v9",
text=r"""
The Monkhorst-Pack grid linear dimensions (coarse grid).
Should correspond to the grid of points available in the DDB or to a sub-grid.
""",
),
Variable(
abivarname="ngrids@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_expert'],
dimensions="scalar",
defaultval=4,
mnemonics="Number of GRIDS",
added_in_version="before_v9",
text=r"""
This number define the series of grids that will be used for the estimation of
the phonon DOS. The coarsest will be tried first, then the next, ... then the
one described by [[anaddb:ng2qpt]]. The intermediate grids are defined for
igrid=1... **ngrids**, by the numbers ngqpt_igrid(ii)=(ng2qpt(ii)*igrid)/**ngrids**
""",
),
Variable(
abivarname="nlflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['nonlinear_compulsory'],
dimensions="scalar",
defaultval=0,
mnemonics="Non-Linear FLAG",
added_in_version="before_v9",
text=r"""
Non-linear properties flag.
* 0 --> do not compute non-linear properties ;
* 1 --> the electrooptic tensor, Raman susceptibilities and non-linear optical susceptibilities are calculated;
* 2 --> only the non-linear optical susceptibilities and first-order changes of the dielectric tensor
induced by an atomic displacement are calculated;
* 3 --> only the non-linear optical susceptibility is calculated.
""",
),
Variable(
abivarname="nph1l@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of PHonons in List 1",
added_in_version="before_v9",
text=r"""
The number of wavevectors in phonon list 1, used for interpolation of the
phonon frequencies. The values of these wavevectors will be specified by
[[anaddb:qph1l]]. The dynamical matrix for these wavevectors, obtained either
directly from the DDB - if [[anaddb:ifcflag]]=0 - or through the interatomic
forces interpolation - if [[anaddb:ifcflag]]=1 -), will be diagonalized, and
the corresponding eigenfrequencies will be printed.
""",
),
Variable(
abivarname="nph2l@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of PHonons in List 2",
added_in_version="before_v9",
text=r"""
The number of wavevectors in phonon list 2, defining the directions along
which the non-analytical splitting of phonon frequencies at Gamma will be
calculated. The actual values of the wavevector directions will be specified
by [[anaddb:qph2l]]. These are actually all wavectors at Gamma, but obtained
by a limit along a different direction in the Brillouin-zone. It is important
to note that non-analyticities in the dynamical matrices are present at Gamma,
due to the long-range Coulomb forces. So, going to Gamma along different
directions can give different results.
The wavevectors in list 2 will be used to:
* generate and diagonalize a dynamical matrix, and print the corresponding eigenvalues.
* calculate the generalized Lyddane-Sachs-Teller relation. Note that if the
three first numbers are zero, then the code will do a calculation at Gamma without non-analyticities.
""",
),
Variable(
abivarname="nqpath@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic', 'PhononWidth_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of Q wavevectors defining a PATH",
added_in_version="before_v9",
text=r"""
Number of q-points in the array [[anaddb:qpath]] defining the path along which
the phonon band structure and phonon linewidths are interpolated.
""",
),
Variable(
abivarname="nqshft@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=1,
mnemonics="Number of Q SHiFTs",
added_in_version="before_v9",
text=r"""
The number of vector shifts of the simple Monkhorst and Pack grid, needed to
generate the coarse grid of q points (for the series of fine grids, the number
of shifts it is always taken to be 1). Usually, put it to 1. Use 2 if BCC
sampling (Warning: not BCC lattice, BCC *sampling*), and 4 for FCC sampling
(Warning: not FCC lattice, FCC *sampling*).
""",
),
Variable(
abivarname="nsphere@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of atoms in SPHERe",
added_in_version="before_v9",
text=r"""
Number of atoms included in the cut-off sphere for interatomic force constant,
see also the alternative [[anaddb:rifcsph]]. If **nsphere** = 0: maximum
extent allowed by the grid. If **nsphere** = -1: the code analyzes different
values of nsphere and find the value that does not lead to unstable
frequencies in a small sphere around Gamma. The truncated IFCs are then used
for further post-processing. The results of the test are reported in the main
output file. This option is useful to obtain a initial guess of nsphere: the
value that leads to stable frequencies and gives linear dispersion for the
acoustic modes around Gamma is usually smaller that the one reported by nsphere -1.
This number defines the atoms for which the short range part of the
interatomic force constants, after imposition of the acoustic sum rule, will
not be put to zero. This option is available for testing purposes (evaluate
the range of the interatomic force constants), because the acoustic sum rule
will be violated if some atoms are no more included in the inverse Fourier Transform.
""",
),
Variable(
abivarname="nstrfix@anaddb",
varset="anaddb",
vartype="integer",
topics=['ConstrainedPol_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Number of STRain components FIXed",
added_in_version="before_v9",
text=r"""
Number of strain component that are fixed during a structural optimisation at
constrained polarization. See [[anaddb:polflag]].
""",
),
Variable(
abivarname="ntemper@anaddb",
varset="anaddb",
vartype="integer",
topics=['Temperature_basic'],
dimensions="scalar",
defaultval=10,
mnemonics="Number of TEMPERatures",
added_in_version="before_v9",
text=r"""
Number of temperatures at which the thermodynamical quantities have to be
evaluated. Now also used for the output of transport quantities in electron-
phonon calculations. The full grid is specified with the [[anaddb:tempermin]]
and [[anaddb:temperinc]] variables. The default temperature grid goes from
100K to 1000K by step of 100K. For the largest temperatures, for most solids,
anharmonic effects not accounted in the harmonic approximation implemented in
anaddb will be important. For weakly bounded systems (e.g. Van der Waals
solids), such anharmonic effects might be important already at room temperature.
""",
),
Variable(
abivarname="nwchan@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=10,
mnemonics="Number of Widths of CHANnels",
added_in_version="before_v9",
text=r"""
The width of the largest channel used to sample the frequencies. The
code will generate different sets of channels, with decreasing widths (by step
of 1 cm$^{-1}$), from this channel width to 1, eventually. It considers to have
converged when the convergence criterion based on [[anaddb:dostol]] and
[[anaddb:thmtol]] have been fulfilled.
""",
),
Variable(
abivarname="outboltztrap@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="OUTput files for BOLTZTRAP code",
added_in_version="before_v9",
text=r"""
If set to 1, the phonon frequencies on the [[anaddb:ngqpt]] grid are output in a format
legible by the BoltzTrap code, which does band interpolation and gets group
velocities. The output file will be appended _BTRAP
""",
),
Variable(
abivarname="outscphon@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="OUTput files for Self Consistent PHONons",
added_in_version="before_v9",
text=r"""
If set to 1, the phonon frequency and eigenvector files needed for a Self
Consistent phonon run (as in [[cite:Souvatzis2008]]) will be output to
files appended _PHFRQ and _PHVEC. The third file needed is appended _PCINFO
for Primitive Cell INFOrmation.
""",
),
Variable(
abivarname="piezoflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['Elastic_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="PIEZOelectric tensor FLAG",
added_in_version="before_v9",
text=r"""
Flag for calculation of piezoelectric tensors
* 0 --> No piezoelectric tensor will be calculated.
* 1 --> Only the clamped-ion piezoelectric tensor is computed and printed.
Requirements for preceding response-function DDB generation run: Strain and electric-field responses.
For the electric-field part, one needs results from a prior 'ddk perturbation' run.
Note that even if only a limited number of piezoelectric tensor terms are wanted
(as determined by [[rfstrs]] and [[rfdir]] in this calculation) it is necessary to set [[rfdir]] = 1 1 1
in the d/dk calculation for most structures. The only obvious exception to this requirement
is cases in which the primitive lattice vectors are all aligned with the cartesian axes.
The code will omit terms in the output piezoelectric tensor for which the available d/dk set is incomplete.
Thus: Set [[rfstrs]] to 1, 2, or 3 (preferably 3)
* 2 --> Both relaxed- and clamped-ion elastic and compliance tensor will be calculated,
but only the relaxed-ion quantities will be printed. The input variable [[anaddb:instrflag]]
should also be set to 1, because the internal-strain tensor is needed to compute the relaxed-ion corrections.
Requirements for preceding response-function DDB generation run: Strain, electric-field and full atomic-displacement
responses at Q=0. Set [[rfstrs]] = 1, 2, or 3 (preferably 3). Set [[rfelfd]] = 3.
Set [[rfatpol]] and <[[rfdir]] to do a full calculation of phonons at Q=0
(needed because the inverse of force-constant tensor is required).
* 3 --> Both relaxed and clamped-ion piezoelectric tensors will be printed out.
The input variable [[anaddb:instrflag]] should also be set to 1.
Requirements for preceding response-function DDB generation run: Same as for **piezoflag** =2.
* 4 --> Calculate the piezoelectric d tensor (relaxed ion). In order to calculate the piezoelectric d tensor,
we need information of internal strain and elastic tensor (relaxed ion).
So we should set [[anaddb:elaflag]]= 2,3,4, or 5 and [[anaddb:instrflag]]=1.
The subroutine will also do a check for you, and print warning message without stopping even if flags were not correctly set.
* 5 --> Calculate the piezoelectric g tensor (relaxed ion). In this computation,
we need information of internal strain, elastic tensor (relaxed ion) and dielectric tensor (relaxed ion).
So we should set: [[anaddb:instrflag]]=1, [[anaddb:elaflag]]=2,3,4 or 5, [[anaddb:dieflag]]=3 or 4.
The subroutine will also do a check for you, and print warning message without stopping even if flags were not correctly set.
* 6 --> Calculate the piezoelectric h tensor (relaxed ion). In this calculation, we need information
of internal strain and dielectric tensor (relaxed ion). So we need set: [[anaddb:instrflag]]=1
and [[anaddb:dieflag]]=3 or 4. The subroutine will also do a check for you, and print warning message
without stopping even if flags were not correctly set.
* 7 --> calculate all the possible piezoelectric tensors, including e (clamped and relaxed ion), d, g and h tensors.
The flags should be set to satisfy the above rules from 1 to 6.
""",
),
Variable(
abivarname="polflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['ConstrainedPol_compulsory'],
dimensions="scalar",
defaultval=0,
mnemonics="POLarization FLAG",
added_in_version="before_v9",
text=r"""
If activated, compute polarization in cartesian coordinates, and update
lattice constants and atomic positions in order to perform a structural
optimization at constrained polarization.
More detailed explanation: ANADDB can use the formalism described in
[[cite:Sai2002]], to perform structural relaxations under the
constraint that the polarization is equal to a value specified by the input
variable [[anaddb:targetpol]]. The user starts from a given configuration of a
crystal and performs a ground-state calculation of the Hellman-Feynman forces
and stresses and the Berry phase polarization as well as a linear response
calculation of the whole matrix of second-order energy derivatives with
respect to atomic displacement, strains and electric field.
In case **polflag** =1, ANADDB solves the linear system of equations (13) in
[[cite:Sai2002]], and computes new atomic positions (if [[anaddb:relaxat]]=1)
and lattice constant (if [[anaddb:relaxstr]]=1). Then, the user uses these
parameters to perform a new ground-state and linear-response calculation. This
must be repeated until convergence is reached. The user can also fix some
atomic positions, or strains, thanks to the input variables [[anaddb:natfix]],
[[anaddb:nstrfix]],[[anaddb:iatfix]],[[anaddb:istrfix]].
In case both [[anaddb:relaxat]] and [[anaddb:relaxstr]] are 0, while
**polflag** =1, ANADDB only computes the polarization in cartesian coordinates.
As described in [[cite:Sai2002]], it is important to use the finite
difference expression of the ddk ([[berryopt]]=2 or -2) in the linear response
calculation of the effective charges and the piezoelectric tensor.
""",
),
Variable(
abivarname="prt_ifc@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT the Interatomic Force Constants",
added_in_version="before_v9",
text=r"""
Flag to print out the Interatomic Force Constants in real space to a file.
The available options are:
* 0 --> do nothing (IFC are printed to the log file);
* 1 --> write out the IFC in file ifcinfo.out (the name is fixed) to be used by AI2PS from John Rehr's group
""",
),
Variable(
abivarname="prtbltztrp@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT input files for BoLTZTRaP code.",
added_in_version="before_v9",
text=r"""
* 0 --> do not write the BoltzTraP input files;
* 1 --> write out the input files for BoLTZTRaP code.
""",
),
Variable(
abivarname="prtddb@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT the Derivative DataBase files",
added_in_version="before_v9",
text=r"""
Flag to print out the DDB file interpolated with the Interatomic Force Constants.
The available options are:
* 0 --> no output of DDB;
* 1 --> Interpolate the DDB and write out the DDB and DDB.nc files.
""",
),
Variable(
abivarname="prtdos@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT the phonon Density Of States",
added_in_version="before_v9",
text=r"""
The **prtdos** variable is used to calculate the phonon density of states,
PHDOS, by Fourier interpolating the interatomic force constants on the (dense)
q-mesh defined by [[anaddb:ng2qpt]]. Note that the variable [[anaddb:ifcflag]]
must be set to 1 since the interatomic force constants are supposed to be known.
The available options are:
* 0 --> no output of PHDOS (default);
* 1 --> calculate PHDOS using the gaussian method and the broadening defined by [[anaddb:dossmear]].
* 2 --> calculate PHDOS using the tetrahedron method.
The step of the frequency grid employed to calculate the DOS can be defined
through the input variable [[anaddb:dosdeltae]].
""",
),
Variable(
abivarname="prtfsurf@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT the Fermi SURFace",
added_in_version="before_v9",
text=r"""
Only for electron-phonon calculations. The available options are:
* 0 --> do not write the Fermi Surface;
* 1 --> write out the Fermi Surface in the BXSF format used by [XCrySDen](http://www.xcrysden.org).
Further comments:
a) Only the eigenvalues for k-points inside the Irreducible Brillouin zone are
required. As a consequence it is possible to use [[kptopt]] =1 during the GS
calculation to reduce the computational effort.
b) Only unshifted k-grids that are orthogonal in reduced space are supported
by [XCrySDen](http://www.xcrysden.org). This implies that [[shiftk]] must be
set to (0,0,0) during the GS calculation with [[nshiftk]]=1. Furthermore if
[[kptrlatt]] is used to generate the k-grid, all the off-diagonal elements of
this array must be zero.
""",
),
Variable(
abivarname="prtmbm@anaddb",
varset="anaddb",
vartype="integer",
topics=['nonlinear_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT Mode-By-Mode decomposition of the electrooptic tensor",
added_in_version="before_v9",
text=r"""
* 0 --> do not write the mode-by-mode decomposition of the electrooptic tensor;
* 1 --> write out the contribution of the individual zone-center phonon modes to the electrooptic tensor.
""",
),
Variable(
abivarname="prtnest@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononWidth_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT the NESTing function",
added_in_version="before_v9",
text=r"""
Only for electron-phonon calculations. This input variable is used to
calculate the nesting function defined as:
$$
\chi_{nm}(q) = \sum_k\delta(\epsilon_{k,n}-\epsilon_F) \delta(\epsilon_{k+q,m}-\epsilon_F).
$$
The nesting factor is calculated for every point of the k-grid employed during the
previous GS calculation. The values are subsequently interpolated along the
trajectory in q space defined by [[anaddb:qpath]], and written in the _NEST
file using the X-Y format ( **prtnest** =1). It is also possible to analyze
the behavior of the function in the reciprocal unit cell saving the values in
the NEST_XSF file that can be read using [XCrySDen](http://www.xcrysden.org) (**prtnest** =2).
Note that in the present implementation what is really
printed to file is the "total nesting" defined as $\sum_{nm} \chi_{nm}(q)$.
Limitations: the k-grid defined by [[kptrlatt]] must be orthogonal in
reciprocal space, moreover off-diagonal elements are not allowed, i.e. [[kptrlatt]]
4 0 0 0 4 0 0 0 4 is fine while [[kptrlatt]] = 1 0 0 0 1 1 0 -1 1 will not work.
* 0 --> do not write the nesting function;
* 1 --> write only the nesting function along the q-path in the X-Y format;
* 2 --> write out the nesting function both in the X-Y and in the XSF format.
""",
),
Variable(
abivarname="prtphbands@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=1,
mnemonics="PRinT PHonon BANDS",
added_in_version="before_v9",
text=r"""
Only if [[anaddb:ifcflag]]=1. This option specifies the file format for the
phonon band structure. Possible values:
* 1 Write frequencies in xmgrace format. A file with extension `PHBANDS.agr` is produced.
Use `xmgrace file_PHBANDS.agr` to visualize the data
* 2 Write frequencies in gnuplot format. The code produces a `PHBANDS.dat` file with the eigenvalues
and a `PHBANDS.gnuplot` script. Use `gnuplot file_PHBANDS.gnuplot` to visualize the phonon band structure.
""",
),
Variable(
abivarname="prtsrlr@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT the Short-Range/Long-Range decomposition of phonon FREQuencies",
added_in_version="before_v9",
text=r"""
Only if [[anaddb:ifcflag]]=1. The available options are:
* 0 --> do not write the SR/LR decomposition of phonon frequencies;
* 1 --> write out the SR/LR decomposition of the square of phonon frequencies at each q-point specified in [[anaddb:qph1l]].
For details see [[cite:Ghosez1996]].
See also [[anaddb:ifcflag]], [[anaddb:ifcflag]] and [[anaddb:dipdip]].
""",
),
Variable(
abivarname="prtvol@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_useful', 'PhononBands_useful', 'Temperature_useful', 'PhononWidth_useful', 'ElPhonTransport_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="PRinT VOLume",
added_in_version="before_v9",
text=r"""
Control the volume of printed output.
""",
),
Variable(
abivarname="q1shft@anaddb",
varset="anaddb",
vartype="real",
topics=['Phonons_useful', 'PhononBands_useful'],
dimensions=['[[anaddb:nqshft]]'],
defaultval=0,
mnemonics="Q shifts for the grid number 1",
added_in_version="before_v9",
text=r"""
This vector gives the shifts needed to define the coarse q-point grid.
a) Case [[anaddb:nqshft]]=1 In general, 0.5 0.5 0.5 with the ngqpt's even will give
very economical grids. On the other hand, is it sometimes better for phonons to
have the Gamma point in the grid. In that case, 0.0 0.0 0.0 should be OK. For
the hexagonal lattice, the above mentioned quantities become 0.0 0.0 0.5 and
0.0 0.0 0.0 .
b) Case [[anaddb:nqshft]]=2 The two q1shft vectors must form a BCC lattice. For
example, use 0.0 0.0 0.0 and 0.5 0.5 0.5
c) Case [[anaddb:nqshft]]=4 The four q1shft vectors must form a FCC lattice. For
example, use 0.0 0.0 0.0, 0.0 0.5 0.5, 0.5 0.0 0.5, 0.5 0.5 0.0 or 0.5 0.5 0.5,
0.0 0.0 0.5, 0.0 0.5 0.0, 0.5 0.0 0.0 (the latter is referred to as shifted)
Further comments: by using this technique, it is possible to increase smoothly
the number of q-points, at least less abruptly than relying on series of grids
like (for the full cubic symmetry):
1x1x1 --> (0 0 0)
2x2x2 (shifted) --> (.25 .25 .25)
2x2x2 --> 1x1x1 + (.5 0 0) (.5 .5 0) (.5 .5 0)
4x4x4 --> 2x2x2 + (.25 0 0) (.25 .25 0) (.25 .5 0) (.25 .25 .25) (.25 .25 .5)
(.25 .5 .5)
...
with respectively 1, 1, 4 and 10 q-points, corresponding to a number of points
in the full BZ of 1, 8, 8 and 64. Indeed, the following grids are made
available:
1x1x1 with [[anaddb:nqshft]]=2 --> (0 0 0) (.5 .5 .5)
1x1x1 with [[anaddb:nqshft]]=4 --> (0 0 0) (.5 .5 0)
1x1x1 with [[anaddb:nqshft]]=4 (shifted) --> (.5 0 0) (.5 .5 .5)
2x2x2 with [[anaddb:nqshft]]=2 --> 2x2x2 + (.25 .25 .25)
2x2x2 with [[anaddb:nqshft]]=4 --> 2x2x2 + (.25 .25 0) (.25 .25 .5)
2x2x2 with [[anaddb:nqshft]]=4 (shifted) --> (.25 0 0) (.25 .25 .25) (.5 .5 .25) (.25 .5 0)
...
with respectively 2, 2, 2, 5, 6 and 4 q-points, corresponding to a number of
points in the full BZ of 2, 4, 4, 16, 32 and 32.
For a FCC lattice, it is possible to sample only the Gamma point by using a
1x1x1 BCC sampling ([[anaddb:nqshft]]=2).
""",
),
Variable(
abivarname="q2shft@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_useful'],
dimensions=[3],
defaultval="3* 0",
mnemonics="Q points SHiFTs for the grids 2",
added_in_version="before_v9",
text=r"""
Similar to [[anaddb:q1shft]], but for the series of fine grids.
Note that [[anaddb:nqshft]] for this series of grids corresponds to 1.
""",
),
Variable(
abivarname="qgrid_type@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononWidth_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="Q GRID TYPE",
added_in_version="before_v9",
text=r"""
If **qgrid_type** is set to 1, the electron-phonon part of anaddb will use the
[[anaddb:ep_nqpt]] and [[anaddb:ep_qptlist]] variables to determine which
q-points to calculate the electron-phonon coupling for. This is an alternative
to a regular grid as in the rest of anaddb (using [[anaddb:ngqpt]]).
""",
),
Variable(
abivarname="qpath@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_basic', 'PhononWidth_basic'],
dimensions=[3, '[[anaddb:nqpath]]'],
defaultval=0.0,
mnemonics="Q wavevectors defining a PATH",
added_in_version="before_v9",
text=r"""
It is used to generate the path along which the phonon band structure and
phonon linewidths are interpolated. There are [[anaddb:nqpath]]-1 segments to
be defined, each of which starts from the end point of the previous one. The
number of divisions in each segment is automatically calculated inside the
code to respect the proportion between the segments. The same circuit is used
for the output of the nesting function if [[anaddb:prtnest]]=1.
""",
),
Variable(
abivarname="qph1l@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_basic'],
dimensions=[4, '[[anaddb:nph1l]]'],
defaultval=0,
mnemonics="Q for PHonon List 1",
added_in_version="before_v9",
text=r"""
List of [[anaddb:nph1l]] wavevectors, at which the phonon frequencies will be
interpolated. Defined by 4 numbers: the wavevector is made by the three first
numbers divided by the fourth one (a normalisation factor). The coordinates
are defined with respect to the unit vectors that spans the Brillouin zone.
Note that this set of axes can be non-orthogonal and not normed. The
normalisation factor makes easier the input of wavevector such as
(1/3,1/3,1/3), represented by 1.0 1.0 1.0 3.0 .
The internal representation of this array is as follows: for each wavevector,
the three first numbers are stored in the array qph1l(3,nph1l), while the
fourth is stored in the array qnrml1(nph1l).
""",
),
Variable(
abivarname="qph2l@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_basic'],
dimensions=[4, '[[anaddb:nph2l]]'],
defaultval=0,
mnemonics="PHonon List 2",
added_in_version="before_v9",
text=r"""
List of phonon wavevector _directions_ along which the non-analytical
correction to the Gamma-point phonon frequencies will be calculated (for
insulators). Four numbers, as for [[anaddb:qph1l]], but where the last one,
that correspond to the normalisation factor, is 0.0 For the anaddb code, this
has the meaning that the three previous values define a direction. The
direction is in CARTESIAN COORDINATES, unlike the non-Gamma wavevectors
defined in the first list of vectors...
Note that if the three first numbers are zero, then the code will do a
calculation at Gamma without non-analyticities.
Also note that the code automatically sets the imaginary part of the dynamical
matrix to zero. This is useful to compute the phonon frequencies when half of
the k-points has been used, by the virtue of the time-reversal symmetry (which
may induce parasitic imaginary parts...).
The internal representation of this array is as follows: for each wavevector,
the three first numbers are stored in the array qph2l(3,nph2l), while the
fourth is stored in the array qnrml2(nph2l).
""",
),
Variable(
abivarname="qrefine@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_expert'],
dimensions=[3],
defaultval=0,
mnemonics="Q-point REFINEment order (experimental)",
added_in_version="before_v9",
text=r"""
If **qrefine** is superior to 1, attempts to initialize a first set of
dynamical matrices from the DDB file, with a q-point grid which is
[[anaddb:ngqpt]] divided by **qrefine** (e.g. ngqpt 4 4 2 qrefine 2 2 1 starts
with a 2x2x2 grid). The dynamical matrices are interpolated onto the full
[[anaddb:ngqpt]] grid and any additional information found in the DDB file is
imposed, before proceeding to normal band structure and other interpolations.
Should implement Gaal-Nagy's algorithm in [[cite:GaalNagy2006]].
""",
),
Variable(
abivarname="ramansr@anaddb",
varset="anaddb",
vartype="integer",
topics=['nonlinear_basic'],
dimensions="scalar",
defaultval=0,
mnemonics="RAMAN Sum-Rule",
added_in_version="before_v9",
text=r"""
Governs the imposition of the sum-rule on the Raman tensors.
As in the case of the Born effective charges, the first-order derivatives of
the linear dielectric susceptibility with respect to an atomic displacement
must vanish when they are summed over all atoms. This sum rule is broken in
most calculations. By putting **ramansr** equal to 1 or 2, this sum rule is
imposed by giving each atom a part of the discrepancy.
* 0 --> no sum rule is imposed;
* 1 --> impose the sum rule on the Raman tensors, giving each atom an equal part of the discrepancy;
* 2 --> impose the sum rule on the Raman tensors, giving each atom a part of the discrepancy
proportional to the magnitude of its contribution to the Raman tensor.
For the time being, **ramansr** =1 is the preferred choice.
""",
),
Variable(
abivarname="relaxat@anaddb",
varset="anaddb",
vartype="integer",
topics=['ConstrainedPol_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="RELAXation of AToms",
added_in_version="before_v9",
text=r"""
If **relaxat** =1, relax atomic positions during a structural relaxation at
constrained polarization. See [[anaddb:polflag]].
""",
),
Variable(
abivarname="relaxstr@anaddb",
varset="anaddb",
vartype="integer",
topics=['ConstrainedPol_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="RELAXation of STRain",
added_in_version="before_v9",
text=r"""
If **relaxstr** =1, relax lattice constants (lengths/angles) during a
structural relaxation at constrained polarization. See [[anaddb:polflag]].
""",
),
Variable(
abivarname="rfmeth@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_expert'],
dimensions="scalar",
defaultval=1,
mnemonics="Response-Function METHod",
added_in_version="before_v9",
text=r"""
Select a particular set of Data Blocks in the DDB. (PRESENTLY, ONLY OPTION 1 IS AVAILABLE)
* 1 --> Blocks obtained by a non-stationary formulation.
* 2 --> Blocks obtained by a stationary formulation.
For more detailed explanations, see [[help:abinit]]. If the information in the
DDB is available, always use the option 2. If not, you can try option 1, which is less accurate.
""",
),
Variable(
abivarname="rifcsph@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval="zero",
mnemonics="Radius of the Interatomic Force Constant SPHere",
added_in_version="before_v9",
text=r"""
Cut-off radius for the sphere for interatomic force constant, see also the
alternative [[anaddb:nsphere]]. If **rifcsph** = 0: maximum extent allowed by the grid.
This number defines the atoms for which the short range part of the
interatomic force constants, after imposition of the acoustic sum rule, will
not be put to zero.
""",
),
Variable(
abivarname="selectz@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="SeLECT Z",
added_in_version="before_v9",
text=r"""
Select some parts of the effective charge tensor. (This is done after the
application or non-application of the ASR for effective charges). The
transformed effective charges are then used for all the subsequent
calculations.
* 0 --> The effective charge tensor is left as it is.
* 1 --> For each atom, the effective charge tensor is made isotropic, by calculating the
trace of the matrix, dividing it by 3, and using this number in a diagonal effective charge tensor.
* 2 --> For each atom, the effective charge tensor is made symmetric, by simply averaging on symmetrical elements.
Note: this is for analysis the effect of anisotropy in the effective charge.
The result with non-zero **selectz** are unphysical.
""",
),
Variable(
abivarname="symdynmat@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_useful', 'PhononBands_useful'],
dimensions="scalar",
defaultval=1,
mnemonics="SYMmetrize the DYNamical MATrix",
commentdefault="(was 0 before v5.3)",
added_in_version="before_v9",
text=r"""
If **symdynmat** is equal to 1, the dynamical matrix is symmetrized before the diagonalization.
This is especially useful when the set of primitive vectors of the unit cell
and their opposite do not reflect the symmetries of the Bravais lattice
(typical case: body-centered tetragonal lattices ; FCC and BCC lattices might
be treated with the proper setting of the [[anaddb:brav]] variable), and the
interpolation procedure based on interatomic force constant is used: there are
some slight symmetry breaking effects. The latter can be bypassed by this additional symmetrization.
""",
),
Variable(
abivarname="symgkq@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonInt_expert'],
dimensions="scalar",
defaultval=1,
mnemonics="SYMmetrize the GKk matrix elements for each Q",
added_in_version="before_v9",
text=r"""
If **symgkq** is equal to 1, the electron-phonon matrix elements are
symmetrized over the small group of the q-point they correspond to. This
should always be used, except for debugging or test purposes.
""",
),
Variable(
abivarname="targetpol@anaddb",
varset="anaddb",
vartype="real",
topics=['ConstrainedPol_useful'],
dimensions=[3],
defaultval=0.0,
mnemonics="TARGET POLarization",
added_in_version="before_v9",
text=r"""
Target value of the polarization in cartesian coordinates and in C/m$^2$. See [[anaddb:polflag]].
""",
),
Variable(
abivarname="telphint@anaddb",
varset="anaddb",
vartype="integer",
topics=['ElPhonTransport_basic', 'PhononWidth_basic'],
dimensions="scalar",
defaultval=1,
mnemonics="Technique for ELectron-PHonon INTegration",
added_in_version="before_v9",
text=r"""
Flag controlling the Fermi surface integration technique used for electron-phonon quantities.
* 0 = tetrahedron method (no adjustable parameter)
* 1 = Gaussian smearing (see [[anaddb:elphsmear]])
* 2 = uniformly weighted band window between [[anaddb:ep_b_min]] and [[anaddb:ep_b_max]], for all k-points
""",
),
Variable(
abivarname="temperinc@anaddb",
varset="anaddb",
vartype="real",
topics=['Temperature_basic'],
dimensions="scalar",
defaultval=100.0,
mnemonics="TEMPERature INCrease",
added_in_version="before_v9",
text=r"""
Increment of the temperature in Kelvin, for thermodynamical and el-phon
transport properties. See the associated [[anaddb:tempermin]] and
[[anaddb:ntemper]] variables. The default temperature grid goes from 100K to
1000K by step of 100K. For the largest temperatures, for most solids,
anharmonic effects not accounted in the harmonic approximation implemented in
anaddb will be important. For weakly bounded systems (e.g. Van der Waals
solids), such anharmonic effects might be important already at room temperature.
""",
),
Variable(
abivarname="tempermin@anaddb",
varset="anaddb",
vartype="real",
topics=['Temperature_basic'],
dimensions="scalar",
defaultval=100.0,
mnemonics="TEMPERature MINimum",
added_in_version="before_v9",
text=r"""
Lowest temperature (Kelvin) at which the thermodynamical quantities have to be
evaluated. Cannot be zero when [[anaddb:thmflag]] is 1.
The highest temperature is defined using [[anaddb:temperinc]] and
[[anaddb:ntemper]]. The default temperature grid goes from 100K to 1000K by
step of 100K. For the largest temperatures, for most solids, anharmonic
effects not accounted in the harmonic approximation implemented in anaddb will
be important. For weakly bounded systems (e.g. Van der Waals solids), such
anharmonic effects might be important already at room temperature.
""",
),
Variable(
abivarname="thermal_supercell@anaddb",
varset="anaddb",
vartype="integer",
topics=['Phonons_expert'],
dimensions=[3, 3],
defaultval="(/(/0,0,0/), (/0,0,0/), (/0,0,0/)/)",
mnemonics="THERMALized SUPERCELL lattice vectors",
characteristics=['[[DEVELOP]]'],
commentdefault="do not calculate any thermalized supercells",
added_in_version="before_v9",
text=r"""
Thermal_supercell defines the real space supercell in which a thermalized
atomic configuration should be produced, following the prescription of
[[cite:Zacharias2016]]. The displacements are chosen
for each phonon mode according to a temperature, and the displacements are
alternated in sign/direction to obtain maximal compensation of the linear
electron phonon coupling. In this way in [[cite:Zacharias2016]] dielectric properties at
finite T can be obtained from a single supercell calculation instead of lots
of MD and configuration averaging.
The supercell vectors are not constrained to be collinear with the normal
lattice vectors: this effect is obtained by using a diagonal
thermal_supercell. The lines of the matrix describe the linear combination of
the primitive cell lattice vectors yielding the supercell vectors, as for [[kptrlatt]].
For the moment this feature is under development and it looks like the
relative phases of the displacements are not fixed properly yet... (Aug 2017)
""",
),
Variable(
abivarname="thmflag@anaddb",
varset="anaddb",
vartype="integer",
topics=['Temperature_compulsory'],
dimensions="scalar",
defaultval=0,
mnemonics="THerMal FLAG",
added_in_version="before_v9",
text=r"""
Flag controlling the calculation of thermal quantities.
* When **thmflag** == 1, the code will compute, using the histogram method:
* the normalized phonon DOS
* the phonon internal energy, free energy, entropy, constant volume heat capacity as a function of the temperature
* the Debye-Waller factors (tensors) for each atom, as a function of the temperature
* the mean-square velocity tensor for each atom, as a function of temperature
* the "average frequency" as a function of the temperature
* When **thmflag** == 2, all the phonon frequencies for the q points in the second grid are printed.
* When **thmflag** == 3, 5 or 7, the thermal corrections to the electronic eigenvalues are calculated. If **thmflag** ==3, the list of phonon wavevector from the first list is used (with equal weight for all wavevectors in this list), while if **thmflag** ==5 or 7, the first grid of wavevectors is used, possibly folded to the irreducible Brillouin Zone if symmetry operations are present, or if they are recomputed (this happens for **thmflag** ==7).
* When **thmflag** == 4 or 6, the temperature broadening (electron lifetime) of the electronic eigenvalues is calculated. If **thmflag** ==4, the list of phonon wavevector from the first list is used (with equal weight for all wavevectors in this list), while if **thmflag** ==6, the first grid of wavevectors is used, possibly folded to the irreducible Brillouin Zone if symmetry operations are present or if they are recomputed (this happens for **thmflag** ==8).
WARNING: The use of symmetries for the temperature dependence of the
eigenenergies is tricky ! It can only be valid for the k points that respect
the symmetries (i.e. the Gamma point), provided one also averages over the
degenerate states.
Input variables that may be needed if this flag is activated:
[[anaddb:dostol]], [[anaddb:nchan]], [[anaddb:ntemper]], [[anaddb:temperinc]],
[[anaddb:tempermin]], as well as the wavevector grid number 2 definition,
[[anaddb:ng2qpt]], [[anaddb:ngrids]], [[anaddb:q2shft]].
""",
),
Variable(
abivarname="thmtol@anaddb",
varset="anaddb",
vartype="real",
topics=['Temperature_useful'],
dimensions="scalar",
defaultval=0.05,
mnemonics="THerModynamic TOLerance",
added_in_version="before_v9",
text=r"""
The relative tolerance on the thermodynamical functions This number will
determine when the series of channel widths with which the DOS is calculated
can be stopped, i.e. the mean of the relative change going from one grid to
the next bigger is smaller than **thmtol**.
""",
),
Variable(
abivarname="use_k_fine@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononWidth_expert'],
dimensions="scalar",
defaultval=0,
mnemonics="USE K-grid FINEr than the coarse k-grid",
added_in_version="before_v9",
text=r"""
When set, [[anaddb:kptrlatt_fine]] is suggested to be given. For the present
version, both eigenvalues (denser grid GKK, obtained from mrggkk with only the
GS WFK file) and electronic velocities(GKK files from DDK calculation) are
needed. Note that the coarse k-grid must be a subset of the fine k-grid.
""",
),
Variable(
abivarname="vs_qrad_tolkms@anaddb",
varset="anaddb",
vartype="real",
topics=['PhononBands_useful'],
dimensions=[2],
defaultval="2*0.0d0",
mnemonics="Speed of Sound Q-radius, TOLerance KiloMeter/Second",
added_in_version="before_v9",
text=r"""
This variable activates the calculation of the speed of sound (requires
[[anaddb:ifcflag]] = 1). The first entry of the array defines the radius of
the small sphere around the Gamma point (Bohr$^{-1}$). The second entry gives the
absolute tolerance in kilometer/second. The speed of sound is evaluated by
performing a spherical average on the small sphere using Lebedev-Laikov grids
(typical values for q-radius: 0.1 Bohr$^{-1}$) The number of radial points is
increased until the integration converges twice withing the tolerance
specified by the user (typical values for tolkms: 0.05 km/s).
The default values will not work.
""",
),
# ABINIT 9
Variable(
abivarname="ddb_filepath@anaddb",
varset="anaddb",
vartype="string",
topics=['Control_useful'],
dimensions="scalar",
defaultval="",
mnemonics="DDB PATH",
added_in_version="9.0.0",
text=r"""
This variable specifies the input DDB file when anaddb is invoked with the new syntax:
anaddb t01.in > log 2> err
instead of the legacy mode based on the files file. Example:
ddb_filepath = "out_DDB"
!!! important
Shell variables e.g. $HOME or tilde syntax `~` for user home are not supported.
""",
),
Variable(
abivarname="output_file@anaddb",
varset="anaddb",
vartype="string",
topics=['Control_useful'],
dimensions="scalar",
defaultval="",
mnemonics="OUTPUT FILE",
added_in_version="9.0.0",
text=r"""
This variable specifies the name of the output file when anaddb is invoked with the new syntax:
anaddb t01.in > log 2> err
instead of the legacy mode based on the files file. Example:
output = "t01.out"
""",
),
#Variable(
# abivarname="md_output@anaddb",
# varset="anaddb",
# vartype="string",
# topics=['Control_useful'],
# dimensions="scalar",
# defaultval="",
# mnemonics="Molecular Dynamics OUTPUT",
# text=r"""
#This variable specifies the name of the MD output file when anaddb is invoked with the new syntax:
#
# anaddb t01.in > log 2> err
#
#instead of the legacy mode based on the files file.
#Note This variable is optional and used ...
#""",
#),
Variable(
abivarname="gkk_filepath@anaddb",
varset="anaddb",
vartype="string",
topics=['Control_useful'],
dimensions="scalar",
defaultval="",
mnemonics="GKK PATH",
added_in_version="9.0.0",
text=r"""
This variable specifies the name of the GKK file when anaddb is invoked with the new syntax:
anaddb t01.in > log 2> err
instead of the legacy mode based on the files file.
This variable is optional and used for performing EPH calculation with [[elphflag@anaddb]].
""",
),
Variable(
abivarname="eph_prefix@anaddb",
varset="anaddb",
vartype="string",
topics=['Control_useful'],
dimensions="scalar",
defaultval="",
mnemonics="EPH PREFIX",
added_in_version="9.0.0",
text=r"""
This variable specifies the prefix for the elphon output files when anaddb is invoked with the new syntax:
anaddb t01.in > log 2> err
instead of the legacy mode based on the files file.
This variable is optional and used for performing EPH calculation with [[elphflag@anaddb]].
""",
),
Variable(
abivarname="ddk_filepath@anaddb",
varset="anaddb",
vartype="string",
topics=['Control_useful'],
dimensions="scalar",
defaultval="",
mnemonics="DDK PATH",
added_in_version="9.0.0",
text=r"""
This variable specifies the name of the input file from which the list of 3 files containing the matrix
elements of the velocity operator are obained
This option is needed when anaddb is invoked with the new syntax:
anaddb t01.in > log 2> err
instead of the legacy mode based on the files file.
This variable is optional and used for performing transport calculations with [[elphflag@anaddb]].
Example:
ddk_filepath = "t94.ddk"
where t94.ddk contains the list of file names.
t90o_DS10_GKK4
t90o_DS10_GKK5
t90o_DS10_GKK6
""",
),
Variable(
abivarname="outdata_prefix@anaddb",
varset="anaddb",
vartype="string",
topics=['Control_useful'],
dimensions="string",
defaultval="",
mnemonics="OUTput DATA PREFIX",
added_in_version="9.2.2",
text=r"""
This variable specifies the prefix name of the output files when anaddb is invoked with the new syntax:
anaddb t01.in > log 2> err
instead of the legacy mode based on the files file. Example:
outdata_prefix = "t01_o"
See also [[outdata_prefix@abinit]]
""",
),
Variable(
abivarname="dos_maxmode@anaddb",
varset="anaddb",
vartype="integer",
topics=['PhononBands_useful'],
dimensions="scalar",
defaultval=0,
mnemonics="Phonon DOS MAX MODE included",
added_in_version="9.5",
text=r"""
This variable specifies the maximum phonon mode index (up to 3*natom)
included in the computation of the phonon DOS
Default is 0 i.e. all modes are included in the DOS.
""",
),
]
|
abinit/abinit
|
abimkdocs/variables_anaddb.py
|
Python
|
gpl-3.0
| 92,751
|
[
"ABINIT",
"BoltzTrap",
"CRYSTAL",
"DIRAC",
"Gaussian",
"Yambo"
] |
7330c6bc181b683a34725b8800630796b37b5736c05bf5c1b5325d58446312c8
|
"""
Module to set up run time parameters for Clawpack -- classic code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='classic'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "classic" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'classic', "Expected claw_pkg = 'classic'"
num_dim = 1
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
probdata = rundata.new_UserData(name='probdata', fname='setprob.data')
probdata.add_param('rho', 1.0, 'density of medium')
probdata.add_param('K', 4.0, 'bulk modulus')
probdata.add_param('beta', 200., 'Gaussian width parameter')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -1.000000e+00 # xlower
clawdata.upper[0] = 1.000000e+00 # xupper
# Number of grid cells:
clawdata.num_cells[0] = 400 # mx
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 2
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 0
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.qNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.q0006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 16
clawdata.tfinal = 0.800000
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 2
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.000000e-01
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.900000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.000000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 500
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 2
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['mc', 'mc']
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
alsam/jlclaw
|
src/acoustics_1d_example1/fortran/setrun.py
|
Python
|
mit
| 7,217
|
[
"Gaussian",
"NetCDF"
] |
d3681076e9f9d5a418e8cd44cf968bf8fd9daf32e6b95fe32667acf428cf4239
|
"""Omega matrix for functionals with Hartree-Fock exchange.
"""
from math import sqrt
import numpy as np
from numpy.linalg import inv
from ase.units import Hartree
import gpaw.mpi as mpi
from gpaw.lrtddft.omega_matrix import OmegaMatrix
from gpaw.pair_density import PairDensity
from gpaw.utilities import pack
from gpaw.utilities.lapack import diagonalize, gemm, sqrt_matrix
from gpaw.utilities.timing import Timer
class ApmB(OmegaMatrix):
"""Omega matrix for functionals with Hartree-Fock exchange.
"""
def get_full(self):
self.paw.timer.start('ApmB RPA')
self.ApB = self.Om
self.AmB = self.get_rpa()
self.paw.timer.stop()
if self.xc is not None:
self.paw.timer.start('ApmB XC')
self.get_xc() # inherited from OmegaMatrix
self.paw.timer.stop()
def get_rpa(self):
"""Calculate RPA and Hartree-fock part of the A+-B matrices."""
# shorthands
kss = self.fullkss
finegrid = self.finegrid
# calculate omega matrix
nij = len(kss)
print >> self.txt, 'RPAhyb', nij, 'transitions'
AmB = np.zeros((nij, nij))
ApB = self.ApB
# storage place for Coulomb integrals
integrals = {}
for ij in range(nij):
print >> self.txt,'RPAhyb kss['+'%d'%ij+']=', kss[ij]
timer = Timer()
timer.start('init')
timer2 = Timer()
# smooth density including compensation charges
timer2.start('with_compensation_charges 0')
rhot_p = kss[ij].with_compensation_charges(
finegrid is not 0)
timer2.stop()
# integrate with 1/|r_1-r_2|
timer2.start('poisson')
phit_p = np.zeros(rhot_p.shape, rhot_p.dtype)
self.poisson.solve(phit_p,rhot_p, charge=None)
timer2.stop()
timer.stop()
t0 = timer.get_time('init')
timer.start(ij)
if finegrid == 1:
rhot = kss[ij].with_compensation_charges()
phit = self.gd.zeros()
self.restrict(phit_p, phit)
else:
phit = phit_p
rhot = rhot_p
for kq in range(ij, nij):
if kq != ij:
# smooth density including compensation charges
timer2.start('kq with_compensation_charges')
rhot = kss[kq].with_compensation_charges(
finegrid is 2)
timer2.stop()
pre = self.weight_Kijkq(ij, kq)
timer2.start('integrate')
I = self.Coulomb_integral_kss(kss[ij], kss[kq], phit, rhot)
if kss[ij].spin == kss[kq].spin:
name = self.Coulomb_integral_name(kss[ij].i, kss[ij].j,
kss[kq].i, kss[kq].j,
kss[ij].spin )
integrals[name] = I
ApB[ij,kq]= pre * I
timer2.stop()
if ij == kq:
epsij = kss[ij].get_energy() / kss[ij].get_weight()
AmB[ij,kq] += epsij
ApB[ij,kq] += epsij
timer.stop()
## timer2.write()
if ij < (nij - 1):
t = timer.get_time(ij) # time for nij-ij calculations
t = .5*t*(nij-ij) # estimated time for n*(n+1)/2, n=nij-(ij+1)
print >> self.txt,'RPAhyb estimated time left',\
self.timestring(t0*(nij-ij-1)+t)
# add HF parts and apply symmetry
timer.start('RPA hyb HF part')
if hasattr(self.xc, 'hybrid'):
weight = self.xc.hybrid
else:
weight = 0.0
for ij in range(nij):
i = kss[ij].i
j = kss[ij].j
s = kss[ij].spin
for kq in range(ij,nij):
if kss[ij].pspin == kss[kq].pspin:
k = kss[kq].i
q = kss[kq].j
ikjq = self.Coulomb_integral_ijkq(i, k, j, q, s, integrals)
iqkj = self.Coulomb_integral_ijkq(i, q, k, j, s, integrals)
ApB[ij,kq] -= weight * ( ikjq + iqkj )
AmB[ij,kq] -= weight * ( ikjq - iqkj )
ApB[kq,ij] = ApB[ij,kq]
AmB[kq,ij] = AmB[ij,kq]
timer.stop()
return AmB
def Coulomb_integral_name(self, i, j, k, l, spin):
"""return a unique name considering the Coulomb integral
symmetry"""
def ij_name(i, j):
return str(max(i, j)) + ' ' + str(min(i, j))
# maximal gives the first
if max(i, j) >= max(k, l):
base = ij_name(i, j) + ' ' + ij_name(k, l)
else:
base = ij_name(k, l) + ' ' + ij_name(i, j)
return base + ' ' + str(spin)
def Coulomb_integral_ijkq(self, i, j, k, q, spin, integrals):
name = self.Coulomb_integral_name(i, j, k, q, spin)
if name in integrals:
return integrals[name]
# create the Kohn-Sham singles
kss_ij = PairDensity(self.paw)
kss_ij.initialize(self.paw.wfs.kpt_u[spin], i, j)
kss_kq = PairDensity(self.paw)
kss_kq.initialize(self.paw.wfs.kpt_u[spin], k, q)
## kss_ij = KSSingle(i, j, spin, spin, self.paw)
## kss_kq = KSSingle(k, q, spin, spin, self.paw)
rhot_p = kss_ij.with_compensation_charges(
self.finegrid is not 0)
phit_p = np.zeros(rhot_p.shape, rhot_p.dtype)
self.poisson.solve(phit_p, rhot_p, charge=None)
if self.finegrid == 1:
phit = self.gd.zeros()
self.restrict(phit_p, phit)
else:
phit = phit_p
rhot = kss_kq.with_compensation_charges(
self.finegrid is 2)
integrals[name] = self.Coulomb_integral_kss(kss_ij, kss_kq,
phit, rhot)
return integrals[name]
def Coulomb_integral_kss(self, kss_ij, kss_kq, phit, rhot):
# smooth part
I = self.gd.integrate(rhot * phit)
wfs = self.paw.wfs
Pij_ani = wfs.kpt_u[kss_ij.spin].P_ani
Pkq_ani = wfs.kpt_u[kss_kq.spin].P_ani
# Add atomic corrections
Ia = 0.0
for a, Pij_ni in Pij_ani.items():
Pi_i = Pij_ni[kss_ij.i]
Pj_i = Pij_ni[kss_ij.j]
Dij_ii = np.outer(Pi_i, Pj_i)
Dij_p = pack(Dij_ii)
Pk_i = Pkq_ani[a][kss_kq.i]
Pq_i = Pkq_ani[a][kss_kq.j]
Dkq_ii = np.outer(Pk_i, Pq_i)
Dkq_p = pack(Dkq_ii)
C_pp = wfs.setups[a].M_pp
# ----
# 2 > P P C P P
# ---- ip jr prst ks qt
# prst
Ia += 2.0*np.dot(Dkq_p, np.dot(C_pp, Dij_p))
I += self.gd.comm.sum(Ia)
return I
def timestring(self,t):
ti = int(t+.5)
td = int(ti//86400)
st=''
if td>0:
st+='%d'%td+'d'
ti-=td*86400
th = int(ti//3600)
if th>0:
st+='%d'%th+'h'
ti-=th*3600
tm = int(ti//60)
if tm>0:
st+='%d'%tm+'m'
ti-=tm*60
st+='%d'%ti+'s'
return st
def diagonalize(self, istart=None, jend=None, energy_range=None):
"""Evaluate Eigenvectors and Eigenvalues:"""
map, kss = self.get_map(istart, jend, energy_range)
nij = len(kss)
if map is None:
ApB = self.ApB.copy()
AmB = self.AmB.copy()
nij = len(kss)
else:
ApB = np.empty((nij, nij))
AmB = np.empty((nij, nij))
for ij in range(nij):
for kq in range(nij):
ApB[ij,kq] = self.ApB[map[ij],map[kq]]
AmB[ij,kq] = self.AmB[map[ij],map[kq]]
# the occupation matrix
C = np.empty((nij,))
for ij in range(nij):
C[ij] = 1. / kss[ij].fij
S = C * inv(AmB) * C
S = sqrt_matrix(inv(S).copy())
# get Omega matrix
M = np.zeros(ApB.shape)
gemm(1.0, ApB, S, 0.0, M)
self.eigenvectors = np.zeros(ApB.shape)
gemm(1.0, S, M, 0.0, self.eigenvectors)
self.eigenvalues = np.zeros((len(kss)))
self.kss = kss
diagonalize(self.eigenvectors, self.eigenvalues)
def read(self, filename=None, fh=None):
"""Read myself from a file"""
if mpi.rank == mpi.MASTER:
if fh is None:
f = open(filename, 'r')
else:
f = fh
f.readline()
nij = int(f.readline())
ApB = np.zeros((nij, nij))
for ij in range(nij):
l = f.readline().split()
for kq in range(ij, nij):
ApB[ij, kq] = float(l[kq - ij])
ApB[kq, ij] = ApB[ij, kq]
self.ApB = ApB
f.readline()
nij = int(f.readline())
AmB = np.zeros((nij, nij))
for ij in range(nij):
l = f.readline().split()
for kq in range(ij, nij):
AmB[ij, kq] = float(l[kq - ij])
AmB[kq, ij] = AmB[ij, kq]
self.AmB = AmB
if fh is None:
f.close()
def weight_Kijkq(self, ij, kq):
"""weight for the coupling matrix terms"""
return 2.
def write(self, filename=None, fh=None):
"""Write current state to a file."""
if mpi.rank == mpi.MASTER:
if fh is None:
f = open(filename, 'w')
else:
f = fh
f.write('# A+B\n')
nij = len(self.fullkss)
f.write('%d\n' % nij)
for ij in range(nij):
for kq in range(ij, nij):
f.write(' %g' % self.ApB[ij, kq])
f.write('\n')
f.write('# A-B\n')
nij = len(self.fullkss)
f.write('%d\n' % nij)
for ij in range(nij):
for kq in range(ij, nij):
f.write(' %g' % self.AmB[ij, kq])
f.write('\n')
if fh is None:
f.close()
def __str__(self):
string = '<ApmB> '
if hasattr(self,'eigenvalues'):
string += 'dimension '+ ('%d'%len(self.eigenvalues))
string += "\neigenvalues: "
for ev in self.eigenvalues:
string += ' ' + ('%f'%(sqrt(ev) * Hartree))
return string
|
qsnake/gpaw
|
gpaw/lrtddft/apmb.py
|
Python
|
gpl-3.0
| 10,953
|
[
"ASE",
"GPAW"
] |
21ed36fa41b2b274d486b26d1a67b79fc0a63ab09d6e371b2712ac27a36ff424
|
"""Pyvista specific errors."""
CAMERA_ERROR_MESSAGE = """Invalid camera description
Camera description must be one of the following:
Iterable containing position, focal_point, and view up. For example:
[(2.0, 5.0, 13.0), (0.0, 0.0, 0.0), (-0.7, -0.5, 0.3)]
Iterable containing a view vector. For example:
[-1.0, 2.0, -5.0]
A string containing the plane orthogonal to the view direction. For example:
'xy'
"""
class NotAllTrianglesError(ValueError):
"""Exception when a mesh does not contain all triangles."""
def __init__(self, message='Mesh must consist of only triangles'):
"""Empty init."""
ValueError.__init__(self, message)
class InvalidCameraError(ValueError):
"""Exception when passed an invalid camera."""
def __init__(self, message=CAMERA_ERROR_MESSAGE):
"""Empty init."""
ValueError.__init__(self, message)
class DeprecationError(RuntimeError):
"""Used for depreciated methods and functions."""
def __init__(self, message='This feature has been depreciated'):
"""Empty init."""
RuntimeError.__init__(self, message)
class VTKVersionError(RuntimeError):
"""Requested feature is not supported by the installed VTK version."""
def __init__(self, message='The requested feature is not supported by the installed VTK version.'):
"""Empty init."""
RuntimeError.__init__(self, message)
|
akaszynski/vtkInterface
|
pyvista/core/errors.py
|
Python
|
mit
| 1,403
|
[
"VTK"
] |
125716d4e7737dc3f15d51846d035774c56e209d274308634857708b09a6b6a5
|
#! /usr/bin/env python
import sympy as sy
import sympy.physics.mechanics as mech
import numpy as np
import scipy as sp
import util
from symbol import t, V, I
# TODO simplify SimplePlanarCrawler
# TODO rename SimplePlanarCrawler
# TODO move definition of head mechanical model into this submodule
"""
Generic model classes.
"""
class Model(object) :
def __init__(self, parameters=None) :
self.parameters=parameters
def subs(self, subs_list) :
# all subclasses should be able to take a list of symbolic
# substitutions and execute these for all symbolic expressions
# belonging to the class
raise NotImplementedError
class DynamicalModel(Model) :
def __init__(self, x=None, parameters=None, f=None, jacobian=None,
f_num=None, jacobian_num=None, FORTRAN_f=None,
FORTRAN_jacobian=None) :
self.x = x # state variables
self.f = f # state evolution rule
self._jacobian = jacobian # jacobian of state evolution rule
self._f_num = f_num # callable state evolution rule
self._jacobian_num = jacobian_num # callable jacobian function
self._FORTRAN_f = FORTRAN_f # FORTRAN source for state evolution rule
self._FORTRAN_jacobian = FORTRAN_jacobian # FORTRAN source for jacobian function
@property
def parameters(self) :
params = []
for param in self.f.free_symbols.difference(self.x).difference({t}) :
if type(param) != sy.stats.rv.RandomSymbol :
params.append(param)
params = np.array(params)
sort_i = np.argsort(params.astype(np.str))
params = params[sort_i].tolist()
return params
def jacobian(self) :
# TODO parallelise -- this is SLOW but must be done in SymPy;
# it should be possible to compute each entry in the Jacobian matrix
# independently
self._jacobian = sy.Matrix(self.f).jacobian(self.x)
return self._jacobian
def f_num(self) :
f_lambdified = sy.lambdify([t] + self.x, self.f)
self._f_num = lambda x, t : np.array(f_lambdified(t, *x), dtype=np.float).flatten()
return self._f_num
def jacobian_num(self, new_jac=False) :
if self._jacobian is None or new_jac is True :
self.jacobian()
jac_lambdified = sy.lambdify([t] + self.x, self._jacobian)
self._jacobian_num = lambda x, t : np.array(jac_lambdified(t, *x))
return self._jacobian_num
def FORTRAN_f(self, verbose=False) :
self._FORTRAN_f = util.FORTRAN_f(self.x, self.f, self.parameters,
verbose)
return self._FORTRAN_f
def FORTRAN_jacobian(self, new_jac=False) :
if self._jacobian is None or new_jac is True :
self.jacobian()
self._FORTRAN_jacobian = util.FORTRAN_jacobian(self.x, self._jacobian, self.parameters)
return self._FORTRAN_jacobian
"""
Mechanical modelling.
"""
def coulomb_friction_function(p, mu_f, mu_b) :
return sy.Piecewise((-mu_f, p > 0), (mu_b, p < 0), (0, True))
def derive_Hamiltons_equations(H, q, p, Q=None) :
"""
Derive equations of motion for a Hamiltonian system.
Arguments
---------
H : Hamiltonian for the system
q : vector of generalised coordinates
p : vector of generalised momenta
Q : vector of generalised forces
Returns
-------
x' : dynamical rule of evolution for the system. Note that x is the full
state vector for the system, x = [q | p].T
"""
if Q is None : Q = np.zeros(len(q))
q_dot = [sy.diff(H, p_i) for p_i in p]
p_dot = [-sy.diff(H, q[i]) + Q[i] for i in xrange(len(q))]
return sy.Matrix(q_dot + p_dot)
class MechanicalSystem(DynamicalModel) :
def __init__(self, q, p, H, Q=None, u=None, timescale=1.) :
"""
Construct the equations of motion for a mechanical system, given a
vector of generalised coordinates q, vector of conjugate momenta p,
Hamiltonian function H, Rayleigh dissipation function R, a vector of
generalised forces Q, and a vector of control inputs u. Often Q will be
a symbolic function of u.
"""
self.q = q
self.p = p
self.H = H
self.Q = Q
self.u = u
self.x = list(q) + list(p)
self.f = derive_Hamiltons_equations(H, q, p, Q=Q)*timescale
def H_num(self) :
H_lambdified = sy.lambdify([t] + self.x, self.H)
self._H_num = lambda x, t : np.array(H_lambdified(t, *x),
dtype=np.float).flatten()
return self._H_num
class ConservativeHead(MechanicalSystem) :
def __init__(self, lam=sy.symbols("lambda"), eps=sy.symbols("epsilon"),
**kwargs) :
# define coordinates and momenta
q = mech.dynamicsymbols("q") # axial strain
phi = mech.dynamicsymbols("phi") # bending angle
p_q = mech.dynamicsymbols("p_q") # axial momentum
p_phi = mech.dynamicsymbols("p_phi") # bending momentum
# define energetic quantities
T = sy.S("1/2")*p_q**2 + \
sy.S("1/2")*(1/((1 + eps*q)**2))*(p_phi**2) # kinetic energy
U_a = sy.S("1/2")*q**2 # axial potential
U_t = sy.S("1/2")*lam**2*phi**2 # transverse potential
U = U_a + U_t # total potential
H = T + U # Hamiltonian
super(ConservativeHead, self).__init__([q, phi], [p_q, p_phi], H, **kwargs)
class NondimensionalHarmonicCrawler(MechanicalSystem) :
def __init__(self, N, w0=sy.symbols("omega_0"), Z=sy.symbols("zeta"),
mu_f=sy.symbols("mu_f"), mu_b=sy.symbols("mu_b"),
b=sy.symbols("b"), **kwargs) :
# construct position, momentum, and control vectors
q = sy.Matrix([mech.dynamicsymbols("q"+str(i + 1)) for i in xrange(N)])
p = sy.Matrix([mech.dynamicsymbols("p"+str(i + 1)) for i in xrange(N)])
u = sy.Matrix([mech.dynamicsymbols("u"+str(i + 1)) for i in xrange(N)])
# construct some useful matrices; scale parameters
if N > 1 :
Z = sy.S("1/4")*Z
w0 = sy.S("1/2")*w0
D1 = -sy.Matrix(sp.linalg.circulant([-1, 1] + [0]*(N - 2)))
else :
D1 = sy.Matrix([1])
D2 = D1.T*D1
# construct the stiffness matrix
K = (w0**2)*D2
# form Hamiltonian function using matrix math, but then write products
# explicitly (this is useful later as it simplifies differentiation and
# some other SymPy functions)
H = sy.S("1/2")*(p.T*p + q.T*K*q)
H = H.as_immutable().as_explicit()[0]
# generalised forces due to control input
Q_u = b*D1*u
# generalised forces due to viscous friction
Q_n = -2*Z*w0*D2*p
# generalised forces due to dry friction
Q_F = sy.Matrix([coulomb_friction_function(p_i, mu_f, mu_b) for p_i in p])
# combine generalised forces
Q = Q_u + Q_n + Q_F
# call superconstructor
super(NondimensionalHarmonicCrawler, self).__init__(q, p, H, Q, u, **kwargs)
# form lists of state and control variables according to body segment
self.seg_x = [self.x[i::len(self.x)/2] for i in xrange(len(self.x)/2)]
self.seg_u = self.u
class SimplePlanarCrawler(MechanicalSystem) :
def __init__(self, N=12,
m=sy.symbols("m"), # segment mass
l=sy.symbols("l"), # equilibrium segment length
L=sy.symbols("L"), # equilibrium body length
k_axial=sy.symbols("k_axial"), # axial stiffness
k_lateral=sy.symbols("k_lateral_2:" + str(12)), # transverse stiffness
k_fluid=sy.symbols("k_fluid"), # fluid stiffness
n_axial=sy.symbols("eta_axial"), # axial viscosity
n_lateral=sy.symbols("eta_lateral_2:" + str(12)), # transverse viscosity
mu_f=sy.symbols("mu_f_1:" + str(13)), # forward dry friction coefficient
mu_b=sy.symbols("mu_b_1:" + str(13)), # backward dry friction coefficient
mu_p=sy.symbols("mu_p_1:" + str(13)), # dry friction power (focus)
b=sy.symbols("b_1:" + str(12)), # axial control gain
c=sy.symbols("c_2:" + str(12))) : # transverse control gain
"""
"""
# TODO add docstring
#################################################################
# define useful functions
#################################################################
norm = lambda x : sy.sqrt(np.dot(x, x))
#################################################################
# define kinematic quantities
#################################################################
t = sy.symbols("t")
# generalised coordinates, giving displacement of each mass relative to lab frame
qx = mech.dynamicsymbols("q_1:" + str(N + 1) + "_x")
qy = mech.dynamicsymbols("q_1:" + str(N + 1) + "_y")
q_vecs = np.array([qx, qy]).T
q = q_vecs.flatten()
# axial vectors pointing along the body axis
q_diffs = np.diff(q_vecs, axis=0)
# conjugate momenta, giving translational momentum of each mass relative to lab frame
px = mech.dynamicsymbols("p_1:" + str(N + 1) + "_x")
py = mech.dynamicsymbols("p_1:" + str(N + 1) + "_y")
p_vecs = np.array([px, py]).T
p = p_vecs.flatten()
# coordinate transformation from q's to phi's
phi_to_q = []
for i in xrange(1, N - 1) :
rd1 = q_diffs[i - 1]
rd2 = q_diffs[i]
angle = sy.atan2(rd1[0]*rd2[1] - rd2[0]*rd1[1],
rd1[0]*rd2[0] + rd1[1]*rd2[1]);
phi_to_q.append(angle)
Dphi_to_Dq = [sy.diff(phi_to_q__i, t) for phi_to_q__i in phi_to_q]
# rs in terms of qs
r_to_q = [norm(q_diff) for q_diff in q_diffs]
Dr_to_Dq = [sy.diff(r_to_q__i, t) for r_to_q__i in r_to_q]
# generalised velocities
Dqx = mech.dynamicsymbols("q_1:" + str(N + 1) + "_x", 1)
Dqy = mech.dynamicsymbols("q_1:" + str(N + 1) + "_y", 1)
Dq_vecs = np.array([Dqx, Dqy]).T
Dq = Dq_vecs.flatten()
# momenta in terms of velocities
Dq_to_p = p*m # TODO double-check this
# TODO derive this from Hamiltonian using Hamilton's
# equation
#################################################################
# define energetic quantities
#################################################################
# kinetic energy
T = (1/(2*m))*np.sum(p**2)
# axial (stretch) elastic energy
U_axial = sy.S("1/2")*k_axial*np.sum((np.array(r_to_q) - l)**2)
# lateral (bending) elastic energy
U_lateral = 0
for i in xrange(1, N - 1) :
U_lateral += k_lateral[i - 1]*sy.acos(np.dot(q_diffs[i], q_diffs[i - 1])/ \
(norm(q_diffs[i])*norm(q_diffs[i - 1])))
U_lateral = sy.S("1/2")*U_lateral
U_lateral = sy.S("1/2")*np.dot(k_lateral, (np.array(phi_to_q))**2)
# fluid elastic energy
U_fluid = sy.S("1/2")*k_fluid*(np.sum(r_to_q) - L)**2
# total potential energy
U = U_axial + U_lateral + U_fluid
# axial dissipation function (viscosity)
R_axial = sy.S("1/2")*n_axial*np.sum(np.array(Dr_to_Dq)**2)
# lateral dissipation function (viscosity)
R_lateral = sy.S("1/2")*np.dot(n_lateral, np.array(Dphi_to_Dq)**2)
# axial dissipation function (control)
#b = sy.symbols("b_1:" + str(N)) # axial gains
#u = mech.dynamicsymbols("u_1:" + str(N)) # axial control variables
#R_u = S("1/2")*np.sum([-b_i*u_i*Dq_i for b_i, u_i, Dq_i in zip(b, u, Dr_to_Dq)])
# lateral dissipation function (control)
v = mech.dynamicsymbols("v_2:" + str(N)) # lateral control variables
R_v = sy.S("1/2")*np.sum([c_i*v_i*Dphi_i for c_i, v_i, Dphi_i in zip(c, v, Dphi_to_Dq)])
# Hamiltonian, H, describing total energy and
# Rayleigh dissipation function, R, describing total power losses
H = T + U
R = R_axial + R_lateral + R_v
# store energetic quantities in object variable
self.H = H # Hamiltonian
self.T = T # kinetic energy
self.U = U # potential energy
self.U_axial = U_axial # axial potential energy
self.U_transverse = U_lateral # transverse potential energy
self.U_fluid = U_fluid # fluid potential energy
self.R = R # Rayleigh dissipation function
self.R_axial = R_axial # axial dissipation function
self.R_transverse = R_lateral # transverse dissipation function
self.R_v = R_v # transverse control dissipation function
#################################################################
# derive / construct generalised forces
#################################################################
# derive dissipative forces in terms of momentum variables
Q_R = []
for Dqi in Dq :
print "Computing dissipative forces associated with " + str(Dqi) + "..."
Q_R.append(-sy.diff(R, Dqi).subs(zip(Dq, Dq_to_p)))
# derive forces due to control input
u = mech.dynamicsymbols("u_1:" + str(N))
Q_u = np.sum(np.array([-b_i*u_i*np.array([sy.diff(r_to_q_i, q_i) for q_i in q])
for b_i, u_i, r_to_q_i in zip(b, u, r_to_q)]).T, axis=1)
# derive forces due to dry friction
R = lambda theta : sy.Matrix([[sy.cos(theta), -sy.sin(theta)], [sy.sin(theta), sy.cos(theta)]])
# find unit linear momentum vectors
p_vecs_unit = [p_vec/sy.sqrt(np.dot(p_vec, p_vec)) for p_vec in p_vecs]
# find unit vectors pointing along "spine"
spine_vecs_unit = [q_diff_vec/sy.sqrt(np.dot(q_diff_vec, q_diff_vec)) for q_diff_vec in q_diffs]
spine_vecs_unit += [spine_vecs_unit[-1]]
spine_vecs_unit = [sy.Matrix(spine_vec) for spine_vec in spine_vecs_unit]
# find rotation matrices to transform from spine vectors to segment orientation (n) vectors
n_R_matrices = [R(0)] + [R(phi_i) for phi_i in phi_to_q] + [R(0)]
# transform to n vectors
n_vecs = [n_R*spine_vec for n_R, spine_vec in zip(n_R_matrices, spine_vecs_unit)]
# find angle of momentum vector relative to n vector
p_angles = [sy.acos(sy.Matrix(p_unit).T*n_vec) for p_unit, n_vec in zip(p_vecs_unit, n_vecs)]
# use angle to find magnitude of friction force
# NOTE this block tends to fail with a NotImplementedError in sympy
for i in xrange(len(p_angles)) :
try :
sy.cos(p_angles[i])
except :
print "failure " + str(i)
_cos = [sy.cos(p_angles[i])[0] for i in xrange(len(p_angles))]
Q_mags = [mu_f[i] + (mu_b[i] - mu_f[i])*((1 - _cos[i])/2.)**mu_p[i] for i in xrange(len(p_angles))]
# compute friction force
Q_friction = [-Q_mag*p_unit for Q_mag, p_unit in zip(Q_mags, p_vecs_unit)]
Q_friction = np.array(Q_friction).flatten()
Q = np.array(Q_u) + np.array(Q_R) + np.array(Q_friction)
# use superconstructor to derive equations of motion
super(SimplePlanarCrawler, self).__init__(q, p, H, Q=Q, u=u + v)
class ConservativeSimplePlanarCrawler(MechanicalSystem) :
def __init__(self, N=12) :
#################################################################
# define parameters
#################################################################
m = sy.symbols("m") # mass
l = sy.symbols("l") # equilibrium segment length
L = sy.symbols("L") # equilibrium body length
k_axial = sy.symbols("k_axial") # axial stiffness
k_lateral = sy.symbols("k_lateral_2:" + str(N)) # bending stiffness
k_fluid = sy.symbols("k_fluid") # fluid stiffness
#################################################################
# define useful functions
#################################################################
norm = lambda x : sy.sqrt(np.dot(x, x))
#################################################################
# define kinematic quantities
#################################################################
# generalised coordinates, giving displacement of each mass relative to lab frame
qx = mech.dynamicsymbols("q_1:" + str(N + 1) + "_x")
qy = mech.dynamicsymbols("q_1:" + str(N + 1) + "_y")
q_vecs = np.array([qx, qy]).T
q = q_vecs.flatten()
# axial vectors pointing along the body axis
q_diffs = np.diff(q_vecs, axis=0)
# conjugate momenta, giving translational momentum of each mass relative to lab frame
px = mech.dynamicsymbols("p_1:" + str(N + 1) + "_x")
py = mech.dynamicsymbols("p_1:" + str(N + 1) + "_y")
p_vecs = np.array([px, py]).T
p = p_vecs.flatten()
# coordinate transformation from q's to phi's
phi_to_q = []
for i in xrange(1, N - 1) :
rd1 = q_diffs[i - 1]
rd2 = q_diffs[i]
angle = sy.atan2(rd1[0]*rd2[1] - rd2[0]*rd1[1],
rd1[0]*rd2[0] + rd1[1]*rd2[1]);
phi_to_q.append(angle)
# rs in terms of qs
r_to_q = [norm(q_diff) for q_diff in q_diffs]
#################################################################
# define energetic quantities
#################################################################
# kinetic energy
T = (1/(2*m))*np.sum(p**2)
# axial (stretch) elastic energy
U_axial = sy.S("1/2")*k_axial*np.sum((np.array(r_to_q) - l)**2)
# lateral (bending) elastic energy
U_lateral = 0
for i in xrange(1, N - 1) :
U_lateral += k_lateral[i - 1]*sy.acos(np.dot(q_diffs[i], q_diffs[i - 1])/ \
(norm(q_diffs[i])*norm(q_diffs[i - 1])))
U_lateral = sy.S("1/2")*U_lateral
U_lateral = sy.S("1/2")*np.dot(k_lateral, (np.array(phi_to_q))**2)
# fluid elastic energy
U_fluid = sy.S("1/2")*k_fluid*(np.sum(r_to_q) - L)**2
# total potential energy
U = U_axial + U_lateral + U_fluid
# Hamiltonian (total energy)
H = T + U
# use superconstructor to derive equations of motion
super(ConservativeSimplePlanarCrawler, self).__init__(q, p, H)
"""
Neural modelling.
"""
class DynamicalNeuron(DynamicalModel) :
def __init__(self, *args, **kwargs) :
super(DynamicalNeuron, self).__init__(*args, **kwargs)
class DynamicalBinaryNeuron(DynamicalNeuron) :
def __init__(self, w=[sy.symbols("w")], u=[I], theta=sy.symbols("theta"),
x=V, k=sy.symbols("k")) :
self.x = [x] # state vector
self.w = w # weight vector
self.u = u # input vector
self.theta = theta # threshold
self.k = k # rate constant for state transition
@property
def f(self) :
x = self.x[0]
w = self.w
u = self.u
k = self.k
return sy.Matrix([sy.Piecewise((k - k*x, np.dot(w, u) > self.theta),
(-k*x, True))])
"""
Set up the neuromuscular system, consisting of three cell types : sensory
neuron (SN), inhibitory interneuron (IN), and motor neuron (MN). This model
includes no muscle fibres; the MNs directly produce forces! There is one cell
of each type within each segment. They are connected to each other and to the
mechanical system as follows :
mechanics -> SN
SN -> MN
-> IN
IN -> MN (neighbouring segment some distance away)
-> IN (neighbouring segment some distance away)
MN -> mechanics
Note that the INs form a "mutual inhibition" network.
"""
class MechanicalFeedbackAndMutualInhibition(DynamicalModel) :
def __init__(self, N_seg,
# TODO provide symbolic SN_u, SN_ws!
SN_u, # vector of sensory neuron inputs
SN_ws, # matrix of sensory neuron input weights
k=1, # binary neuron switching rate
SN_thresh=sy.symbols("theta_SN"),
IN_SN_w=1, # sensory neuron -> inhibitory interneuron weight
#IN_IN_w=-2, # inh interneuron -> inhibitory interneuron weight
IN_IN_w=sy.symbols("IN_IN_w"),
IN_thresh=0.5, # IN threshold for activation
MN_SN_w=1, # sensory neuron -> motor neuron weight
#MN_IN_w=-2, # inhibitory interneuron -> motor neuron weight
MN_IN_w=sy.symbols("MN_IN_w"),
MN_thresh=0.5) : # MN threshold before activation
# state variables for each neuron population
V_SNs = [sy.symbols("V_SN_" + str(i + 1)) for i in xrange(N_seg)]
V_INs = [sy.symbols("V_IN_" + str(i + 1)) for i in xrange(N_seg)]
V_MNs = [sy.symbols("V_MN_" + str(i + 1)) for i in xrange(N_seg)]
# construct sensory neuron population
print "Constructing sensory neuron population..."
SNs = [DynamicalBinaryNeuron(w, SN_u, SN_thresh, r, k) for w, r in zip(SN_ws, V_SNs)]
# set inhibitory interneuron inputs :
# SN -> IN within the same segment
# IN -> IN across non-adjacent segments
print "Setting inhibitory interneuron input weights..."
IN_u = V_SNs + V_INs
IN_SN_ws = (IN_SN_w*np.eye(N_seg)).tolist()
IN_IN_adj = sp.linalg.circulant([0, 0] + [1]*(N_seg - 3) + [0])
IN_IN_ws = (IN_IN_w*IN_IN_adj).tolist()
IN_ws = [SN_w + IN_w for SN_w, IN_w in zip(IN_SN_ws, IN_IN_ws)]
# construct inhibitory interneuron population
print "Constructing inhibitory interneuron population..."
INs = [DynamicalBinaryNeuron(w, IN_u, IN_thresh, r, k) for w, r in zip(IN_ws, V_INs)]
# set motor neuron inputs :
# SN -> MN within the same segment
# IN -> MN across non-adjacent segments
print "Setting motor neuron input weights..."
MN_u = V_SNs + V_INs
MN_SN_ws = (MN_SN_w*np.eye(N_seg)).tolist()
MN_IN_adj = IN_IN_adj
MN_IN_ws = (MN_IN_w*MN_IN_adj).tolist()
MN_ws = [SN_w + IN_w for SN_w, IN_w in zip(MN_SN_ws, MN_IN_ws)]
print "Constructing motor neuron population..."
MNs = [DynamicalBinaryNeuron(w, MN_u, MN_thresh, r, k) for w, r in zip(MN_ws, V_MNs)]
# combine neural populations and prepare neural states and dynamical equations
neurons = SNs + INs + MNs
f = sy.Matrix([c.f for c in neurons])
x = sy.Matrix([c.x for c in neurons])
super(MechanicalFeedbackAndMutualInhibition, self).__init__(x=x, f=f)
|
janeloveless/mechanics-of-exploration
|
neuromech/model.py
|
Python
|
unlicense
| 24,479
|
[
"NEURON"
] |
e05fa4101d9ed8870433d6920df55bf1794ba1dcff2f5de35b7139b12178988e
|
from __future__ import division
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.naive_bayes import MultinomialNB, ComplementNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_priors_sum_isclose():
# test whether the class prior sum is properly tested"""
X = np.array([[-1, -1], [-2, -1], [-3, -2], [-4, -5], [-5, -4],
[1, 1], [2, 1], [3, 2], [4, 4], [5, 5]])
priors = np.array([0.08, 0.14, 0.03, 0.16, 0.11, 0.16, 0.07, 0.14,
0.11, 0.0])
Y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
clf = GaussianNB(priors)
# smoke test for issue #9633
clf.fit(X, Y)
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(var_smoothing=0.1), X, y, cv=10)
assert_greater(scores.mean(), 0.89)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_cnb():
# Tests ComplementNB when alpha=1.0 for the toy example in Manning,
# Raghavan, and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo.
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1).
Y = np.array([0, 0, 0, 1])
# Check that weights are correct. See steps 4-6 in Table 4 of
# Rennie et al. (2003).
theta = np.array([
[
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6)
],
[
(1 + 1) / (6 + 6),
(3 + 1) / (6 + 6),
(0 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(0 + 1) / (6 + 6)
]])
weights = np.zeros(theta.shape)
normed_weights = np.zeros(theta.shape)
for i in range(2):
weights[i] = -np.log(theta[i])
normed_weights[i] = weights[i] / weights[i].sum()
# Verify inputs are nonnegative.
clf = ComplementNB(alpha=1.0)
assert_raises(ValueError, clf.fit, -X, Y)
clf.fit(X, Y)
# Check that counts/weights are correct.
feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]])
assert_array_equal(clf.feature_count_, feature_count)
class_count = np.array([3, 1])
assert_array_equal(clf.class_count_, class_count)
feature_all = np.array([1, 4, 1, 1, 1, 1])
assert_array_equal(clf.feature_all_, feature_all)
assert_array_almost_equal(clf.feature_log_prob_, weights)
clf = ComplementNB(alpha=1.0, norm=True)
clf.fit(X, Y)
assert_array_almost_equal(clf.feature_log_prob_, normed_weights)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.partial_fit,
X, y, classes=[0, 1])
assert_raise_message(ValueError, expected_msg, m_nb.partial_fit,
X, y, classes=[0, 1])
|
herilalaina/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 24,653
|
[
"Gaussian"
] |
2e3d940430381df6b5541f4ef9d7a53d890d8dbf0ab34e4b120a963e0d2b93c7
|
import urllib2
from urllib2 import urlparse
from restful_lib import Connection
#from gae_restful_lib import GAE_Connection
from datetime import datetime
from StringIO import StringIO
from xml.etree import ElementTree as ET
SPARQL_ENDPOINT = "/services/sparql"
META_ENDPOINT = "/meta"
CONTENT_ENDPOINT = "/items"
JOB_REQUESTS = "/jobs"
SNAPSHOTS = "/snapshots"
SNAPSHOT_TEMPLATE = "/snapshots/%s"
RESET_STORE_TEMPLATE = u"""<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:bf="http://schemas.talis.com/2006/bigfoot/configuration#" >
<bf:JobRequest>
<rdfs:label>%s</rdfs:label>
<bf:jobType rdf:resource="http://schemas.talis.com/2006/bigfoot/configuration#ResetDataJob"/>
<bf:startTime>%sZ</bf:startTime>
</bf:JobRequest>
</rdf:RDF>"""
SNAPSHOT_STORE_TEMPLATE = """<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:bf="http://schemas.talis.com/2006/bigfoot/configuration#" >
<bf:JobRequest>
<rdfs:label>%s</rdfs:label>
<bf:jobType rdf:resource="http://schemas.talis.com/2006/bigfoot/configuration#SnapshotJob"/>
<bf:startTime>%sZ</bf:startTime>
</bf:JobRequest>
</rdf:RDF>"""
SNAPSHOT_RESTORE_TEMPLATE = """<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:bf="http://schemas.talis.com/2006/bigfoot/configuration#" >
<bf:JobRequest>
<rdfs:label>%s</rdfs:label>
<bf:jobType rdf:resource="http://schemas.talis.com/2006/bigfoot/configuration#RestoreJob"/>
<bf:snapshotUri rdf:resource="%s" />
<bf:startTime>%sZ</bf:startTime>
</bf:JobRequest>
</rdf:RDF>"""
class RDFFormatException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Store():
def __init__(self, base_store_url, username=None, password=None):
""" Base URL for the store should be pretty self-explanatory. E.g. something like
"http://api.talis.com/stores/store_name"
Only needs to enter the username/password if this class is going to tinker
with things."""
if base_store_url.endswith('/'):
base_store_url = base_store_url[:-1]
self.base_store_url = base_store_url
# Split the given URL
if base_store_url:
self.conn = Connection(base_store_url, username=username, password=password)
def does_snapshot_exist(self, snapshot_filename):
# Test to see if snapshot exists:
snapshot_path = SNAPSHOT_TEMPLATE % snapshot_filename
response = self.conn.request(snapshot_path, method = "HEAD")
if response.get('headers') and response.get('headers').get('status'):
status = response.get('headers').get('status')
if status in ['200', '204']:
return True
elif status.startswith('4'):
return False
# else: raise Error?
return False
def schedule_reset_data(self, label, at_time=None):
"""Will request that the store is emptied, and label the request.
If a time is given as an ISO8601 formatted string, this will be
the scheduled time for the snapshot. Otherwise, it will use the current time."""
if not at_time:
at_time=datetime.utcnow().isoformat().split('.')[0]
snapshot_request = RESET_STORE_TEMPLATE % (label, at_time)
return self.conn.request_post(JOB_REQUESTS, body = snapshot_request, headers={'Content-Type':'application/rdf+xml'})
def schedule_snapshot_data(self, label, at_time=None):
"""Will request a snapshot be made of the store.
If a time is given as an ISO8601 formatted string, this will be
the scheduled time for the snapshot. Otherwise, it will use the current time."""
if not at_time:
at_time=datetime.utcnow().isoformat().split('.')[0]
snapshot_request = SNAPSHOT_STORE_TEMPLATE % (label, at_time)
return self.conn.request_post(JOB_REQUESTS, body = snapshot_request, headers={'Content-Type':'application/rdf+xml'})
def schedule_snapshot_restore(self, label, snapshot_filename, at_time=None):
"""Will request that the store is restored from a snapshot. If a time is given as
an ISO8601 formatted string, this will be the scheduled time for
the recovery. Otherwise, it will use the current time."""
if not at_time:
at_time=datetime.utcnow().isoformat().split('.')[0]
# Test to see if snapshot exists:
snapshot_path = SNAPSHOT_TEMPLATE % snapshot_filename
if self.does_snapshot_exist(snapshot_filename):
snapshot_uri = "%s%s" % (self.base_store_url, snapshot_path)
snapshot_request = SNAPSHOT_RESTORE_TEMPLATE % (label, snapshot_uri, at_time)
return self.conn.request_post(JOB_REQUESTS, body = snapshot_request, headers={'Content-Type':'application/rdf+xml'})
def submit_rdfxml(self, rdf_text):
"""Puts the given RDF/XML into the Talis Store"""
return self._put_rdf(rdf_text, mimetype="application/rdf+xml")
def _put_rdf(self, rdf_text, mimetype="application/rdf+xml"):
"""Placeholder for allowing other serialisation types to be put into a
Talis store, whether the conversion takes place here, or if the Talis
store starts to accept other formats."""
if rdf_text:
request_headers = {}
if mimetype not in ['application/rdf+xml']:
raise RDFFormatException("%s is not an allowed RDF serialisation format" % mimetype)
request_headers['Content-Type'] = mimetype
return self.conn.request_post(META_ENDPOINT, body=rdf_text, headers=request_headers)
def _query_sparql_service(self, query, args={}):
"""Low-level SPARQL query - returns the message and response headers from the server.
You may be looking for Store.sparql instead of this."""
passed_args = {'query':query}
passed_args.update(args)
return self.conn.request_get(SPARQL_ENDPOINT, args=passed_args, headers={'Content-type':'application/x-www-form-urlencoded'})
def _query_search_service(self, query, args={}):
"""Low-level content box query - returns the message and response headers from the server.
You may be looking for Store.search instead of this."""
passed_args = {'query':query}
passed_args.update(args)
return self.conn.request_get(CONTENT_ENDPOINT, args=passed_args, headers={'Content-type':'application/x-www-form-urlencoded'} )
def _list_snapshots(self, passed_args={}):
return self.conn.request_get(SNAPSHOTS, args=passed_args, headers={})
##############################################################################
# Convenience Functions
##############################################################################
def submit_rdfxml_from_url(self, url_to_file, headers={"Accept":"application/rdf+xml"}):
"""Convenience method - downloads the file from a given url, and then pushes that
into the meta store. Currently, it doesn't put it through a parse-> reserialise
step, so that it could handle more than rdf/xml on the way it but it is a
future possibility."""
import_rdf_connection = Connection(url_to_file)
response = import_rdf_connection.request_get("", headers=headers)
if response.get('headers') and response.get('headers').get('status') in ['200', '204']:
request_headers = {}
# Lowercase all response header fields, to make matching easier.
# According to HTTP spec, they should be case-insensitive
response_headers = response['headers']
for header in response_headers:
response_headers[header.lower()] = response_headers[header]
# Set the body content
body = response.get('body').encode('UTF-8')
# Get the response mimetype
rdf_type = response_headers.get('content-type', None)
return self._put_rdf(body, mimetype=rdf_type)
def sparql(self, query, args={}):
"""Performs a SPARQL query and simply returns the body of the response if successful
- if there is an issue, such as a code 404 or 500, this method will return False.
Use the _query_sparql_service method to get hold of
the complete response in this case."""
response = self._query_sparql_service(query, args)
headers = response.get('headers')
status = headers.get('status', headers.get('Status'))
if status in ['200', 200, '204', 204]:
return response.get('body').encode('UTF-8')
else:
return False
def search(self, query, args={}):
"""Performs a search query and simply returns the body of the response if successful
- if there is an issue, such as a code 404 or 500, this method will return False.
Use the _query_search_service method to get hold of
the complete response in this case."""
response = self._query_search_service(query, args)
headers = response.get('headers')
status = headers.get('status', headers.get('Status'))
if status in ['200', 200, '204', 204]:
parsed_atom = Atom_Search_Results(response.get('body').encode('UTF-8'))
return parsed_atom.get_item_list()
else:
return False
class Item():
def __init__(self):
self.title = None
self.link = None
class Atom_Search_Results():
def __init__(self, atom_text):
self.load_atom_search(atom_text)
def load_atom_search(self, atom_text):
self.atom = ET.fromstring(atom_text)
def get_item_list(self):
if self.atom:
items = []
for item in self.atom.findall('{http://purl.org/rss/1.0/}item'):
item_fields = Item()
item_fields.title = item.find('{http://purl.org/rss/1.0/}title').text
item_fields.link = item.find('{http://purl.org/rss/1.0/}link').text
items.append(item_fields)
return items
class GAE_Store(Store):
def __init__(self, base_store_url, username=None, password=None):
""" Base URL for the store should be pretty self-explanatory. E.g. something like
"http://api.talis.com/stores/store_name"
The username and password will not do anything, until the Google app engine's
fetch library handles authentication, if ever."""
if base_store_url.endswith('/'):
base_store_url = base_store_url[:-1]
self.base_store_url = base_store_url
# Split the given URL
if base_store_url:
self.conn = GAE_Connection(base_store_url, username, password)
|
darvin/qtdjango
|
src/qtdjango/restclient/talis.py
|
Python
|
gpl-2.0
| 11,420
|
[
"TINKER"
] |
b627476bfc3b5cd7f6f89e34fc4a820ec4649fe0d0001b6217fb1c47ab7129be
|
#
# This file contains the new classes programmed in python added to the
# main objects (Beam, OE and Source) defined in C (in ShadowLib)
#
# It also define GeometricSource and Beamline
#
import Shadow.ShadowLib as ShadowLib
import numpy
import inspect
import copy
class Beam(ShadowLib.Beam):
def __init__(self, N=None):
ShadowLib.Beam.__init__(self)
if N is not None:
self.SetRayZeros(N)
def duplicate(self):
beam_copy = Beam()
beam_copy.rays = copy.deepcopy(self.rays)
return beam_copy
def retrace(self,dist):
try:
tof = (-self.rays[:,1].flatten() + dist)/self.rays[:,4].flatten()
self.rays[:,0] += tof*self.rays[:,3].flatten()
self.rays[:,1] += tof*self.rays[:,4].flatten()
self.rays[:,2] += tof*self.rays[:,5].flatten()
except AttributeError:
print ('retrace: No rays')
def traceCompoundOE(self,compoundOE,from_oe=1,write_start_files=0,write_end_files=0,\
write_star_files=0, write_mirr_files=0):
"""
traces a compound optical element
IMPORTANT: Note that shadow3 changes the values of the OE when tracing (i.e., oe1 changes after
beam.traceOE(oe1) ). This does not happen with compoundOE: Each oe inside compoundOE is
copied before tracing.
Note also that when using write_*_files keyword, the files are written by python, not
by SHADOW (so FWRITE is not changed), with the exception of write_mirr_files. In this case
the code changes in the oe copy FWRITE=1 (mirror files only). This affects the returned
list of oe's after tracing.
:param compoundOE: input object
:param from_oe: index of the first oe (for tracing compoundOE after an existing system) (default=1)
:param write_start_files: 0=No (default), 1=Yes (all), 2: only first and last ones
:param write_end_files: 0=No (default), 1=Yes (all), 2: only first and last ones
:param write_star_files: 0=No (default), 1=Yes (all), 2: only first and last ones
:param write_mirr_files: 0=No (default), 1=Yes (all), 2: only first and last ones
:return: a new compoundOE with the list of the OE objects after tracing (the info of end.xx files)
"""
# oe_index = from_oe
# oe_n = compoundOE.number_oe()
# list = CompoundOE()
#
# for i,oe in enumerate(compoundOE.list):
# print("\nTracing compound oe %d from %d. Absolute oe number is: %d"%(i+1,oe_n,oe_index))
#
# print(">>>>>>>>> FILE_SOURCE before 0",len(oe.FILE_SOURCE))
# oe1 = oe.duplicate()
# print(">>>>>>>>> FILE_SOURCE before 1",len(oe1.FILE_SOURCE))
# iwrite = 0
# if write_mirr_files == 1: iwrite = 1
# if write_mirr_files == 2 and i == 0: iwrite = 1
# if write_mirr_files == 2 and i == oe_n-1: iwrite = 1
# if iwrite:
# oe1.FWRITE = 1
# iwrite = 0
# if write_start_files == 1: iwrite = 1
# if write_start_files == 2 and i == 0: iwrite = 1
# if write_start_files == 2 and i == oe_n-1: iwrite = 1
#
# if iwrite:
# #TODO: check possible bug: the length of FILE_SOURCE is changed when writing start file
# print(">>>>>>>> TRACE BEFORE WRITE <<<<<< type(oe1.FILE_SOURCE)",i,(oe1.FILE_SOURCE),len(oe1.FILE_SOURCE))
# #tmp = oe1.duplicate()
# oe1.write("start.%02d"%(oe_index))
# print(">>>>>>>> TRACE AFTER WRITE <<<<<< type(oe1.FILE_SOURCE)",i,(oe1.FILE_SOURCE),len(oe1.FILE_SOURCE))
#
# print("File written to disk: start.%02d"%(oe_index))
#
# print(">>>>>>>>> FILE_SOURCE before",len(oe1.FILE_SOURCE),len(oe1.FILE_REFL))
# #tmp = oe1.duplicate()
# self.traceOE(oe1,oe_index)
# print(">>>>>>>>> FILE_SOURCE after",len(oe1.FILE_SOURCE),len(oe1.FILE_REFL))
#
# list.append(oe1)
#
# iwrite = 0
# if write_star_files == 1: iwrite = 1
# if write_star_files == 2 and i == 0: iwrite = 1
# if write_star_files == 2 and i == oe_n-1: iwrite = 1
# if iwrite == 1:
# self.write("star.%02d"%(oe_index))
# print("File written to disk: star.%02d"%(oe_index))
#
# iwrite = 0
# if write_end_files == 1: iwrite = 1
# if write_end_files == 2 and i == 0: iwrite = 1
# if write_end_files == 2 and i == oe_n-1: iwrite = 1
# if write_end_files == 1:
# oe1.write("end.%02d"%(oe_index))
# print("File written to disk: end.%02d"%(oe_index))
#
# oe_index += 1
#
# return list
oe_n = len(compoundOE.list)
for i in range(oe_n):
print("\nTracing compound oe %d from %d. Absolute oe number is: %d"%(i+1,oe_n,from_oe+i))
#if wanted to write mirr.xx, tell SHADOW to do it
if write_mirr_files == 1:
compoundOE.list[i].FWRITE = 1
if write_mirr_files == 2:
if i == 0 or i == oe_n-1:
compoundOE.list[i].FWRITE = 1
#dump start.xx files, if selected
if write_start_files == 1:
compoundOE.list[i].write("start.%02d"%(from_oe+i))
if write_start_files == 2:
if i == 0 or i == oe_n-1:
compoundOE.list[i].write("start.%02d"%(from_oe+i))
self.traceOE( compoundOE.list[i], from_oe+1)
#dump star.xx files, if selected
if write_star_files == 1:
self.write("star.%02d"%(from_oe+i))
if write_star_files == 2:
if i == 0 or i == oe_n-1:
self.write("star.%02d"%(from_oe+i))
#dump end.xx files, of selected
if write_end_files == 1:
compoundOE.list[i].write("end.%02d"%(from_oe+i))
if write_end_files == 2:
if i == 0 or i == oe_n-1:
compoundOE.list[i].write("end.%02d"%(from_oe+i))
return
def get_standard_deviation(self,col, nolost=1, ref=0):
'''
returns the standard deviation of one viariable in the beam
:param col: variable (shadow column number)
:param nolost: 0 = use all rays, 1=good only, 2= lost only
:param ref: 0 = no weight, 1=weight with intensity (col23)
:return:
'''
x = self.getshonecol(col=col,nolost=nolost)
if ref == 0:
return x.std()
else:
w = self.getshonecol(23,nolost=nolost)
average = numpy.average(x, weights=w)
variance = numpy.average( (x-average)**2, weights=w)
return(numpy.sqrt(variance))
#added srio 2015
def getshonecol(self,col, nolost=0):
'''
Extract a column from a shadow file (eg. begin.dat) or a Shadow.Beam instance.
The column are numbered in the fortran convention, i.e. starting from 1.
It returns a numpy.array filled with the values of the chosen column.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded. OR
Shadow.Beam initialized instance.
col : int for the chosen columns.
Outputs:
numpy.array 1-D with length numpy.INT.
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 Xp direction or divergence [rads]
5 Yp direction or divergence [rads]
6 Zp direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
#A2EV = 50676.89919462
codata_h = numpy.array(6.62606957e-34)
codata_ec = numpy.array(1.602176565e-19)
codata_c = numpy.array(299792458.0)
A2EV = 2.0*numpy.pi/(codata_h*codata_c/codata_ec*1e2)
col=col-1
ray = self.rays
column = None
if col>=0 and col<18 and col!=10: column = ray[:,col]
if col==10: column = ray[:,col]/A2EV
if col==18: column = 2*numpy.pi*1.0e8/ray[:,10]
if col==19: column = numpy.sqrt(ray[:,0]*ray[:,0]+ray[:,1]*ray[:,1]+ray[:,2]*ray[:,2])
if col==20: column = numpy.arccos(ray[:,4])
if col==21: column = numpy.sqrt(numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8,15,16,17] ]),axis=0))
if col==22: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8,15,16,17] ]),axis=0)
if col==23: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
if col==24: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
if col==25: column = ray[:,10]*1.0e8
if col==26: column = ray[:,3]*ray[:,10]*1.0e8
if col==27: column = ray[:,4]*ray[:,10]*1.0e8
if col==28: column = ray[:,5]*ray[:,10]*1.0e8
if col==29:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
column = E2p+E2s
if col==30:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
column = E2p-E2s
if col==31:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
Cos = numpy.cos(ray[:,13]-ray[:,14])
column = 2*E2s*E2p*Cos
if col==32:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
Sin = numpy.sin(ray[:,13]-ray[:,14])
column = 2*E2s*E2p*Sin
if nolost == 0:
return column.copy()
if nolost == 1:
f = numpy.where(ray[:,9] > 0.0)
if len(f[0])==0:
print ('getshonecol: no GOOD rays, returning empty array')
return numpy.empty(0)
return column[f].copy()
if nolost == 2:
f = numpy.where(ray[:,9] < 0.0)
if len(f[0])==0:
print ('getshonecol: no BAD rays, returning empty array')
return numpy.empty(0)
return column[f].copy()
return None
def getshcol(self,col,nolost=0):
'''
Extract multiple columns from a shadow file (eg.'begin.dat') or a Shadow.Beam instance.
The column are numbered in the fortran convention, i.e. starting from 1.
It returns a numpy.array filled with the values of the chosen column.
Inputs:
beam : str instance with the name of the shadow file to be loaded. OR
Shadow.Beam initialized instance.
col : tuple or list instance of int with the number of columns chosen.
Outputs:
numpy.array 2-D with dimension R x numpy.INT. Where R is the total number of column chosen
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
ret = []
if isinstance(col, int): return self.getshonecol(col,nolost=nolost)
for c in col:
ret.append(self.getshonecol(c,nolost=nolost))
return tuple(ret)
def intensity(self,nolost=0):
w = self.getshonecol(23,nolost=nolost)
return w.sum()
def nrays(self,nolost=0):
try:
w = self.getshonecol(10)
except Exception:
print("Error: Empty beam...")
return 0
if nolost == 0:
return w.size
if nolost == 1:
return numpy.array(numpy.where(w >= 0)).size
if nolost == 2:
return numpy.array(numpy.where(w < 0)).size
def histo1(self,col,xrange=None,nbins=50,nolost=0,ref=0,write=None,factor=1.0):
'''
Calculate the histogram of a column, simply counting the rays, or weighting with the intensity.
It returns a dictionary which contains the histogram data.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded, or a Shadow.Beam initialized instance.
col : int for the chosen column.
Optional keywords:
xrange : tuple or list of length 2 describing the interval of interest for x, the data read from the chosen column.
(default: None, thus using min and max of the array)
nbins : number of bins of the histogram.
nolost :
0 All rays
1 Only good rays
2 Only lost rays
ref :
0 (or None) only count the rays
1 weight with intensity (look at col=23 |E|^2 total intensity)
other value: use that column as weight
write :
None (default) don't write any file
file_name write the histogram into the file 'file_name'.
factor : a scalar factor to multiply the selected column before histogramming
(e.g., for changing scale from cm to um then factor=1e4).
Outputs:
a python dictionary with the calculated histogram. The following keys are set:
histogram, histogram_sigma, bin_center, bin_left, xramge, intensity
xrange, nbins, ref, nolost
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
#initialize return value
ticket = {'error':1}
coli = col - 1
if ref == 1: ref = 23
if ref == None: ref = 0
# copy the inputs
ticket['col'] = col
ticket['write'] = write
ticket['nolost'] = nolost
ticket['nbins'] = nbins
ticket['xrange'] = xrange
ticket['write'] = write
ticket['factor'] = factor
ticket['ref'] = ref
if ref==0:
x = self.getshonecol(col, nolost=nolost)
w = numpy.ones(len(x))
else:
x, w = self.getshcol((col,ref),nolost=nolost)
if factor != 1.0: x *= factor
# if nolost==0:
# t = numpy.where(a!=-3299)
#
# if nolost==1:
# t = numpy.where(a > 0.0)
#
# if nolost==2:
# t = numpy.where(a < 0.0)
#
# if nolost > 2:
# print ('invalid value for nolost flag: %d'%(nolost))
# #raise KeyError ('invalid value for nolost flag: %d'%(nolost))
# return ticket
#
#
# t = numpy.array(t)
# t.shape = -1
#
# if t.size == 0:
# print ('no rays match the selection, the histogram will not be calculated')
# return ticket
if xrange == None:
xrange = [x.min(), x.max() ]
h,bins = numpy.histogram(x,bins=nbins,range=xrange,weights=w)
#evaluate the histogram with squares of the weight for error calculations
h2,bins2 = numpy.histogram(x,bins=nbins,range=xrange,weights=(w*w))
#Evaluation of histogram error.
# See Pag 17 in Salvat, Fernandez-Varea and Sempau
# Penelope, A Code System for Monte Carlo Simulation of
# Electron and Photon Transport, AEN NEA (2003)
#
# See James, Rep. Prog. Phys., Vol 43 (1980) pp 1145-1189 (special attention to pag. 1184)
h_sigma = numpy.sqrt( h2 - h*h/float(len(w)) )
if write != None and write != "":
f = open(write,'w')
f.write('#F %s \n'%(write))
f.write('#C This file has been created using Shadow.Beam.histo1() \n')
f.write('#C COLUMN 1 CORRESPONDS TO ABSCISSAS IN THE CENTER OF EACH BIN\n')
f.write('#C COLUMN 2 CORRESPONDS TO ABSCISSAS IN THE THE LEFT CORNER OF THE BIN\n')
f.write('#C COLUMN 3 CORRESPONDS TO INTENSITY\n')
f.write('#C COLUMN 4 CORRESPONDS TO ERROR: SIGMA_INTENSITY\n')
f.write('#C col = %d\n'%(col))
f.write('#C nolost = %d\n'%(nolost))
f.write('#C nbins = %d\n'%(nbins))
f.write('#C ref = %d\n'%(ref),)
f.write(' \n')
f.write('#S 1 histogram\n')
f.write('#N 4\n')
f.write('#L X1 X2 Y YERR\n')
for i in range(len(h)):
f.write('%f\t%f\t%f\t%f\n' % ( (bins[i]+bins[i+1])*0.5, bins[i], h[i], h_sigma[i] ))
f.close()
print('histo1: file written to disk: %s'%(write))
#
# output
ticket['error'] = 0
ticket['histogram'] = h
ticket['bins'] = bins
ticket['histogram_sigma'] = h_sigma
bin_center = bins[:-1]+(bins[1]-bins[0])*0.5
ticket['bin_center'] = bin_center
ticket['bin_left'] = bins[:-1]
ticket['bin_right'] = bins[:-1]+(bins[1]-bins[0])
ticket['xrange'] = xrange
ticket['intensity'] = w.sum()
ticket['fwhm'] = None
ticket['nrays'] = self.nrays(nolost=0)
ticket['good_rays'] = self.nrays(nolost=1)
#for practical purposes, writes the points the will define the histogram area
tmp_b = []
tmp_h = []
for s,t,v in zip(ticket["bin_left"],ticket["bin_right"],ticket["histogram"]):
tmp_b.append(s)
tmp_h.append(v)
tmp_b.append(t)
tmp_h.append(v)
ticket['histogram_path'] = numpy.array(tmp_h)
ticket['bin_path'] = numpy.array(tmp_b)
#CALCULATE fwhm
tt = numpy.where(h>=max(h)*0.5)
if h[tt].size > 1:
binSize = bins[1]-bins[0]
ticket['fwhm'] = binSize*(tt[0][-1]-tt[0][0])
ticket['fwhm_coordinates'] = (bin_center[tt[0][0]],bin_center[tt[0][-1]])
return ticket
def get_good_range(self,icol, nolost=0):
"""
:param icol: the column number (SHADOW convention, starting from 1)
:param nolost: lost rays flag (0=all, 1=good, 2=losses)
:return: [rmin,rmax] the selected range
"""
col = self.getshonecol(icol,nolost=nolost)
if col.size == 0:
return [-1,1]
rmin = min(col)
rmax = max(col)
if rmin>0.0:
rmin = rmin*0.95
else:
rmin = rmin*1.05
if rmax<0.0:
rmax = rmax*0.95
else:
rmax = rmax*1.05
if rmin==rmax:
rmin = rmin*0.95
rmax = rmax*1.05
if rmin==0.0:
rmin = -1.0
rmax = 1.0
return [rmin,rmax]
def histo2(self,col_h,col_v,nbins=25,ref=23, nbins_h=None, nbins_v=None, nolost=0,xrange=None,yrange=None):
"""
performs 2d histogram to prepare data for a plotxy plot
It uses histogram2d for calculations
Note that this Shadow.Beam.histo2 was previously called Shadow.Beam.plotxy
:param col_h: the horizontal column
:param col_v: the vertical column
:param nbins: number of bins
:param ref: ref=0:weight with rays, ref=1 or 23 weight with intensities, ref=col weight with col ,
:param nbins_h: number of bins in H
:param nbins_v: number of bins in V
:param nolost: 0 or None: all rays, 1=good rays, 2=only losses
:param xrange: range for H
:param yrange: range for V
:return: a dictionary with all data needed for plot
"""
ticket = {'error':1}
if ref == 1: ref = 23
if ref == None: ref = 0
if nbins_h == None: nbins_h = nbins
if nbins_v == None: nbins_v = nbins
# copy the inputs
ticket['col_h'] = col_h
ticket['col_v'] = col_v
ticket['nolost'] = nolost
ticket['nbins_h'] = nbins_h
ticket['nbins_v'] = nbins_v
ticket['ref'] = ref
(col1,col2) = self.getshcol((col_h,col_v),nolost=nolost)
if xrange==None: xrange = self.get_good_range(col_h,nolost=nolost)
if yrange==None: yrange = self.get_good_range(col_v,nolost=nolost)
if ref == 0:
weights = col1*0+1
else:
weights = self.getshonecol(ref,nolost=nolost)
(hh,xx,yy) = numpy.histogram2d(col1, col2, bins=[nbins_h,nbins_v], range=[xrange,yrange], normed=False, weights=weights)
ticket['xrange'] = xrange
ticket['yrange'] = yrange
ticket['bin_h_edges'] = xx
ticket['bin_v_edges'] = yy
ticket['bin_h_left'] = numpy.delete(xx,-1)
ticket['bin_v_left'] = numpy.delete(yy,-1)
ticket['bin_h_right'] = numpy.delete(xx,0)
ticket['bin_v_right'] = numpy.delete(yy,0)
ticket['bin_h_center'] = 0.5*(ticket['bin_h_left']+ticket['bin_h_right'])
ticket['bin_v_center'] = 0.5*(ticket['bin_v_left']+ticket['bin_v_right'])
ticket['histogram'] = hh
ticket['histogram_h'] = hh.sum(axis=1)
ticket['histogram_v'] = hh.sum(axis=0)
ticket['intensity'] = self.intensity(nolost=nolost)
ticket['nrays'] = self.nrays(nolost=0)
ticket['good_rays'] = self.nrays(nolost=1)
#CALCULATE fwhm
h = ticket['histogram_h']
tt = numpy.where(h>=max(h)*0.5)
if h[tt].size > 1:
binSize = ticket['bin_h_center'][1]-ticket['bin_h_center'][0]
ticket['fwhm_h'] = binSize*(tt[0][-1]-tt[0][0])
ticket['fwhm_coordinates_h'] = (ticket['bin_h_center'][tt[0][0]],ticket['bin_h_center'][tt[0][-1]])
else:
ticket["fwhm_h"] = None
h = ticket['histogram_v']
tt = numpy.where(h>=max(h)*0.5)
if h[tt].size > 1:
binSize = ticket['bin_v_center'][1]-ticket['bin_v_center'][0]
ticket['fwhm_v'] = binSize*(tt[0][-1]-tt[0][0])
ticket['fwhm_coordinates_v'] = (ticket['bin_v_center'][tt[0][0]],ticket['bin_v_center'][tt[0][-1]])
else:
ticket["fwhm_v"] = None
# print(">>>>Sum of H: ",ticket["histogram_h"].sum())
# print(">>>>Sum of V: ",ticket["histogram_v"].sum())
# print(">>>>Sum of I: ",ticket["histogram"].sum())
# print(">>>>Sum of W: ",weights.sum())
return ticket
def plotxy(self,*args, **kwargs):
print("Deprecated use of Shadow.plotxy(): Use Shadow.histo2()")
ticket = self.histo2(*args,**kwargs)
return(ticket)
class OE(ShadowLib.OE):
def __init__(self):
ShadowLib.OE.__init__(self)
# here methods to initialize the OE
# renamed setScreens -> set_screens srio@esrf.eu
def set_screens(self,
n_screen=1,
i_screen=numpy.zeros(10),
i_abs=numpy.zeros(10),
sl_dis=numpy.zeros(10),
i_slit=numpy.zeros(10),
i_stop=numpy.zeros(10),
k_slit=numpy.zeros(10),
thick=numpy.zeros(10),
file_abs=numpy.array(['', '', '', '', '', '', '', '', '', '']),
rx_slit=numpy.zeros(10),
rz_slit=numpy.zeros(10),
cx_slit=numpy.zeros(10),
cz_slit=numpy.zeros(10),
file_src_ext=numpy.array(['', '', '', '', '', '', '', '', '', ''])
):
self.F_SCREEN = 1
if n_screen<=10 and n_screen>0:
self.N_SCREEN = n_screen
else:
print ('Shadow cannot handle more then 10 screens')
return
self.I_SCREEN = i_screen
self.I_ABS = i_abs
self.SL_DIS = sl_dis
self.I_SLIT = i_slit
self.I_STOP = i_stop
self.K_SLIT = k_slit
self.THICK = thick
self.FILE_ABS = file_abs
self.RX_SLIT = rx_slit
self.RZ_SLIT = rz_slit
self.CX_SLIT = cx_slit
self.CZ_SLIT = cz_slit
self.FILE_SRC_EXT = file_src_ext
return self
def set_empty(self,T_INCIDENCE=0,T_REFLECTION=180.0,T_SOURCE=0.0,T_IMAGE=0.0,ALPHA=0.0):
"""
Defines an empty optical element (useful as an arm, e.g. to rotate reference frames)
By default, there is no change in the optical axis direction.
:param T_INCIDENCE: incidence angle (default=0)
:param T_REFLECTION: reflection angle (default=180)
:param T_SOURCE: distance from previous o.e. (default=0)
:param T_IMAGE: ddistance to next or (default=0)
:param ALPHA: mirror oriantation angle (default=0)
:return:
"""
self.F_REFRAC = 2
self.T_INCIDENCE = T_INCIDENCE
self.T_REFLECTION = T_REFLECTION
self.T_SOURCE = T_SOURCE
self.T_IMAGE = T_IMAGE
self.ALPHA = ALPHA
return self
# #
# # TODO: REMOVE START HERE
# #
# def setOutput(self,fwrite=0):
# self.FWRITE = fwrite
# return self
#
# def setFrameOfReference(self,source_distance=10.0,image_distance=20.0,source_angle=10.0,image_angle=10.0,alpha=0.0):
# self.T_SOURCE = source_distance
# self.T_IMAGE = image_distance
# self.T_INCIDENCE = source_angle
# self.T_REFLECTION = image_angle
# self.ALPHA = alpha
# return self
#
# def setSeed(self,istar=12345701):
# self.ISTAR1 = istar
# return self
#
# def setConvex(self):
# self.F_CONVEX = 1
# return self
#
# def setConcave(self):
# self.F_CONVEX = 0
# return self
#
# def setCylindric(self,cyl_ang=0.0):
# self.FCYL = 1
# self.CIL_ANG = cyl_ang
# return self
#
# def unsetCylinder(self):
# self.FCYL = 0
# return self
#
# def setAutoFocus(self,f_default=0,ssour=0.0,simag=0.0,theta=0.0):
# self.F_EXT = 0
# self.F_DEFAULT = f_default
# if f_default==0:
# self.SSOUR = ssour
# self.SIMAG = simag
# self.THETA = theta
# return self
#
# def unsetReflectivity(self):
# self.F_REFLEC = 0
# return self
#
# def setReflectivityFull(self,f_refl=0,file_refl='GAAS.SHA',rparams=numpy.zeros(2,dtype=numpy.float64),f_thick=0):
# self.F_REFLEC = 1
# self.F_REFL = f_refl
# self.FILE_REFL = file_refl
# self.ALFA = rparams[0]
# self.GAMMA = rparams[1]
# self.F_THICK = f_thick
# return self
#
# def setReflectivityScalar(self,f_refl=0,file_refl='GAAS.SHA',rparams=numpy.zeros(2,dtype=numpy.float64),f_thick=0):
# self.F_REFLEC = 2
# self.F_REFL = f_refl
# self.FILE_REFL = file_refl
# self.F_THICK = f_thick
# return self
#
# def setMultilayer(self,f_reflec=1,file_refl='GAAS.SHA',f_thick=0):
# self.F_REFLEC = f_reflec
# self.F_REFL = 2
# self.FILE_REFL = file_refl
# self.F_THICK = f_thick
# return self
#
# def setSpheric(self,rmirr=20.0):
# self.FMIRR = 1
# self.F_EXT = 1
# self.RMIRR = rmirr
# return self
#
# def setSphericAuto(self,fparams=None):#):f_default=0,ssour=0.0,simag=0.0,theta=0.0):
# self.FMIRR = 1
# if fparams == None:
# self.setAutoFocus(1)
# else:
# self.setAutoFocus(0,ssour=fparams[0],simag=fparams[1],theta=fparams[2])
# return self
#
# def setEllipsoid(self,ell_the=0.0,axmaj=0.0,axmin=0.0):
# self.FMIRR = 2
# self.F_EXT = 1
# self.ELL_THE = ell_the
# self.AXMAJ = axmaj
# self.AXMIN = axmin
# return self
#
# def setEllipsoidAuto(self,fparams=None):#ell_the=0.0,axmaj=0.0,axmin=0.0,f_default=0,ssour=0.0,simag=0.0,theta=0.0):
# self.FMIRR = 2
# if fparams==None:
# self.setAutoFocus(1)
# else:
# self.setAutoFocus(0,ssour=fparams[0],simag=fparams[1],theta=fparams[2])
# return self
#
# def setToroid(self,f_torus=0,r_maj=0.0,r_min=0.0):
# self.FMIRR = 3
# self.F_EXT = 1
# self.F_TORUS = f_torus
# self.R_MAJ = r_maj
# self.R_MIN = r_min
# return self
#
# def setToroidAuto(self,f_torus=0,r_maj=0.0,r_min=0.0,f_default=0,ssour=0.0,simag=0.0,theta=0.0):
# self.FMIRR = 3
# self.F_TORUS = f_torus
# self.R_MAJ = r_maj
# self.R_MIN = r_min
# self.setAutoFocus(f_default,ssour,simag,theta)
# return self
#
# def setParaboloid(self,f_side=1,param=0.0):
# self.FMIRR = 4
# self.F_EXT = 1
# self.F_SIDE = f_side
# self.PARAM = param
# return self
#
# def setParaboloidAuto(self,f_side=1,fparams=None):#f_side=1,param=0.0,f_default=0,ssour=0.0,simag=0.0,theta=0.0,f_side=0):
# self.FMIRR = 4
# self.F_SIDE = f_side
# if fparams==None:
# self.setAutoFocus(1)
# else:
# self.setAutoFocus(0,ssour=fparam[0],simag=fparam[1],theta=fparams[2])
# return self
#
# def setPlane(self):
# self.FMIRR = 5
# self.F_EXT = 1
# return self
#
# def setCodlingSlit(self,cod_len=0.0,cod_wid=0.0): # HERE ASK MANOLO, always 1 or 0
# self.FMIRR = 6
# self.F_EXT = 1
# self.COD_LEN = 0.0
# self.COD_WID = 0.0
# return self
#
# def setHyperboloid(self,ell_the=0.0,axmaj=0.0,axmin=0.0):
# self.FMIRR = 7
# self.F_EXT = 1
# self.ELL_THE = ell_the
# self.AXMAJ = axmaj
# self.AXMIN = axmin
# return self
#
# def setHyperboloidAuto(self,fparams=None):#ell_the=0.0,axmaj=0.0,axmin=0.0,f_default=0,ssour=0.0,simag=0.0,theta=0.0):
# self.FMIRR = 2
# if fparams==None:
# self.setAutoFocus(1)
# else:
# self.setAutoFocus(0,ssour=fparams[0],simag=fparams[1],theta=fparams[2])
# return self
#
# def setCone(self,cone_a=0.0):
# self.FMIRR = 8
# self.F_EXT = 1
# self.CONE_A = cone_a
# return self
#
# def setPoly(self,file_mir=''):
# self.FMIRR = 9
# self.F_EXT = 1
# self.FILE_MIR = file_mir
# return self
#
# def setCCC(self,ccc=numpy.array([1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],dtype=numpy.float64)):
# self.FMIRR = 10
# self.F_EXT = 1
# self.CCC[:] = ccc[:]
# return self
#
# def setRipple(self,f_g_s=0,xyAmpWavPha=numpy.array([0.0,0.0,0.0,0.0,0.0,0.0],dtype=numpy.float64),file_rip=''):
# self.F_RIPPLE = 1
# self.F_G_S = f_g_s
# self.X_RIP_AMP = xyAmpWavPha[0]
# self.X_RIP_WAV = xyAmpWavPha[1]
# self.X_PHASE = xyAmpWavPha[2]
# self.Y_RIP_AMP = xyAmpWavPha[3]
# self.Y_RIP_WAV = xyAmpWavPha[4]
# self.Y_PHASE = xyAmpWavPha[5]
# self.FILE_RIP = file_rip
# return self
#
# def setDimensions(self,fshape=1,params=numpy.zeros(4,dtype=numpy.float64)):
# self.FHIT_C = 1
# self.FSHAPE = fshape
# self.RLEN1 = params[0]
# self.RLEN2 = params[1]
# self.RWIDX1 = params[2]
# self.RWIDX2 = params[3]
# #TODO set self.FHIT_C = 0 elsewhere
# return self
#
# def setReflector(self):
# self.F_REFRACT = 0
# return self
#
# def setRefractor(self,r_ind_obj = 1.0,r_ind_ima = 1.0,r_attenuation_obj = 0.0,r_attenuation_ima = 0.0):
# self.F_REFRACT = 1
# self.R_IND_OBJ = r_ind_obj
# self.R_IND_IMA = r_ind_ima
# self.R_ATTENUATION_OBJ = r_attenuation_obj
# self.R_ATTENUATION_IMA = r_attenuation_ima
# return self
#
# def unsetCrystal(self):
# self.F_CRYSTAL = 0
# return self
#
# def setCrystal(self,file_refl=b'',a_bragg=0.0):
# self.F_CRYSTAL = 1
# self.FILE_REFL = file_refl
# self.F_REFLECT = 0
#
# if a_bragg!=0.0:
# self.F_BRAGG_A = 1
# self.A_BRAGG = a_bragg
# return self
#
# def setJohansson(self,r_johansson=None):
# self.F_JOHANSSON = 1
# #TODO set self.F_JOHANSSON = 0 elsewhere
# if r_johansson!=None:
# self.F_EXT = 1
# self.R_JOHANSSON=r_johansson
# else:
# self.F_EXT = 0
# return self
#
# def setGratingRulingConstant(self,f_ruling=1,ruling=0.0):
# self.F_GRATING = 1
# self.F_RULING = f_ruling
# self.RULING = ruling
# return self
#
# def setGratingHolographic(self,holo_params=numpy.zeros(7,dtype=numpy.float64),f_pw=2,f_pw_c=0,f_virtual=0):
# self.F_GRATING = 1
# self.F_RULING = 2
# self.HOLO_R1 = holo_params[0]
# self.HOLO_R2 = holo_params[1]
# self.HOLO_DEL = holo_params[2]
# self.HOLO_GAM = holo_params[3]
# self.HOLO_W = holo_params[4]
# self.HOLO_RT1 = holo_params[5]
# self.HOLO_RT2 = holo_params[6]
# self.F_PW = f_pw
# self.F_PW_C = f_pw_c
# self.F_VIRTUAL = f_virtual
# return self
#
# def setGratingFan(self,azim_fan=0.0,dist_fan=0.0,coma_fac=0.0):
# self.F_GRATING = 1
# self.F_RULING = 3
# self.AZIM_FAN = azim_fan
# self.DIST_FAN = dist_fan
# self.COMA_FAC = coma_fac
# return self
#
# def setGratingReserved(self):
# self.F_GRATING = 1
# self.F_RULING = 4
# return self
#
# def setGratingPolynomial(self,poly_params=numpy.zeros(5,dtype=numpy.float64),f_rul_abs=0):
# self.F_GRATING = 1
# self.F_RULING = 5
# self.F_RUL_ABS = f_rul_abs
# self.RULING = poly_params[0]
# self.RUL_A1 = poly_params[1]
# self.RUL_A2 = poly_params[2]
# self.RUL_A3 = poly_params[3]
# self.RUL_A4 = poly_params[4]
# return self
#
# def setMosaic(self,mosaic_seed=4732093,spread_mos=0.0,thickness=0.0):
# self.F_MOSAIC = 1
# self.MOSAIC_SEED = mosaic_seed
# self.SPREAD_MOS = spread_mos
# self.THICKNESS = thickness
# return self
#
# def setAutoTuning(self,f_phot_cent=0,phot_cent=5000.0,r_lambda=100.0):
# self.F_CENTRAL = 1
# self.F_PHOT_CENT = f_phot_cent
# self.PHOT_CENT = phot_cent
# self.R_LAMBDA = r_lambda
# return self
#
# def setAutoMonochromator(self,f_phot_cent=0,phot_cent=5000.0,r_lambda=100.0,f_mono=0,f_hunt=1,hparam=numpy.zeros(3,dtype=numpy.float64)):
# self.setAutoTuning(f_phot_cent=f_phot_cent,phot_cent=phot_cent,r_lambda=r_lambda)
# self.F_MONO = f_mono
# self.F_HUNT = f_hunt
# self.HUNT_H = hparam[0]
# self.HUNT_L = hparam[1]
# self.BLAZE = hparam[2]
# return self
#
# #
# # TODO: REMOVE END HERE
# #
def to_dictionary(self):
mem = inspect.getmembers(self)
mydict = {}
for i,var in enumerate(mem):
if var[0].isupper():
mydict[var[0]]= var[1]
return(mydict)
# def duplicate(self):
# oe_new = OE()
# mem = inspect.getmembers(self)
# for i,var in enumerate(mem):
# if var[0].isupper():
# tmp = getattr(self,var[0])
# setattr(oe_new,var[0],var[1])
# return(oe_new)
def duplicate(self):
new_oe = OE()
new_oe.FMIRR = self.FMIRR
new_oe.F_TORUS = self.F_TORUS
new_oe.FCYL = self.FCYL
new_oe.F_EXT = self.F_EXT
new_oe.FSTAT = self.FSTAT
new_oe.F_SCREEN = self.F_SCREEN
new_oe.F_PLATE = self.F_PLATE
new_oe.FSLIT = self.FSLIT
new_oe.FWRITE = self.FWRITE
new_oe.F_RIPPLE = self.F_RIPPLE
new_oe.F_MOVE = self.F_MOVE
new_oe.F_THICK = self.F_THICK
new_oe.F_BRAGG_A = self.F_BRAGG_A
new_oe.F_G_S = self.F_G_S
new_oe.F_R_RAN = self.F_R_RAN
new_oe.F_GRATING = self.F_GRATING
new_oe.F_MOSAIC = self.F_MOSAIC
new_oe.F_JOHANSSON = self.F_JOHANSSON
new_oe.F_SIDE = self.F_SIDE
new_oe.F_CENTRAL = self.F_CENTRAL
new_oe.F_CONVEX = self.F_CONVEX
new_oe.F_REFLEC = self.F_REFLEC
new_oe.F_RUL_ABS = self.F_RUL_ABS
new_oe.F_RULING = self.F_RULING
new_oe.F_PW = self.F_PW
new_oe.F_PW_C = self.F_PW_C
new_oe.F_VIRTUAL = self.F_VIRTUAL
new_oe.FSHAPE = self.FSHAPE
new_oe.FHIT_C = self.FHIT_C
new_oe.F_MONO = self.F_MONO
new_oe.F_REFRAC = self.F_REFRAC
new_oe.F_DEFAULT = self.F_DEFAULT
new_oe.F_REFL = self.F_REFL
new_oe.F_HUNT = self.F_HUNT
new_oe.F_CRYSTAL = self.F_CRYSTAL
new_oe.F_PHOT_CENT = self.F_PHOT_CENT
new_oe.F_ROUGHNESS = self.F_ROUGHNESS
new_oe.F_ANGLE = self.F_ANGLE
new_oe.NPOINT = self.NPOINT
new_oe.NCOL = self.NCOL
new_oe.N_SCREEN = self.N_SCREEN
new_oe.ISTAR1 = self.ISTAR1
new_oe.CIL_ANG = self.CIL_ANG
new_oe.ELL_THE = self.ELL_THE
new_oe.N_PLATES = self.N_PLATES
new_oe.IG_SEED = self.IG_SEED
new_oe.MOSAIC_SEED = self.MOSAIC_SEED
new_oe.ALPHA = self.ALPHA
new_oe.SSOUR = self.SSOUR
new_oe.THETA = self.THETA
new_oe.SIMAG = self.SIMAG
new_oe.RDSOUR = self.RDSOUR
new_oe.RTHETA = self.RTHETA
new_oe.OFF_SOUX = self.OFF_SOUX
new_oe.OFF_SOUY = self.OFF_SOUY
new_oe.OFF_SOUZ = self.OFF_SOUZ
new_oe.ALPHA_S = self.ALPHA_S
new_oe.RLEN1 = self.RLEN1
new_oe.RLEN2 = self.RLEN2
new_oe.RMIRR = self.RMIRR
new_oe.AXMAJ = self.AXMAJ
new_oe.AXMIN = self.AXMIN
new_oe.CONE_A = self.CONE_A
new_oe.R_MAJ = self.R_MAJ
new_oe.R_MIN = self.R_MIN
new_oe.RWIDX1 = self.RWIDX1
new_oe.RWIDX2 = self.RWIDX2
new_oe.PARAM = self.PARAM
new_oe.HUNT_H = self.HUNT_H
new_oe.HUNT_L = self.HUNT_L
new_oe.BLAZE = self.BLAZE
new_oe.RULING = self.RULING
new_oe.ORDER = self.ORDER
new_oe.PHOT_CENT = self.PHOT_CENT
new_oe.X_ROT = self.X_ROT
new_oe.D_SPACING = self.D_SPACING
new_oe.A_BRAGG = self.A_BRAGG
new_oe.SPREAD_MOS = self.SPREAD_MOS
new_oe.THICKNESS = self.THICKNESS
new_oe.R_JOHANSSON = self.R_JOHANSSON
new_oe.Y_ROT = self.Y_ROT
new_oe.Z_ROT = self.Z_ROT
new_oe.OFFX = self.OFFX
new_oe.OFFY = self.OFFY
new_oe.OFFZ = self.OFFZ
new_oe.SLLEN = self.SLLEN
new_oe.SLWID = self.SLWID
new_oe.SLTILT = self.SLTILT
new_oe.COD_LEN = self.COD_LEN
new_oe.COD_WID = self.COD_WID
new_oe.X_SOUR = self.X_SOUR
new_oe.Y_SOUR = self.Y_SOUR
new_oe.Z_SOUR = self.Z_SOUR
new_oe.X_SOUR_ROT = self.X_SOUR_ROT
new_oe.Y_SOUR_ROT = self.Y_SOUR_ROT
new_oe.Z_SOUR_ROT = self.Z_SOUR_ROT
new_oe.R_LAMBDA = self.R_LAMBDA
new_oe.THETA_I = self.THETA_I
new_oe.ALPHA_I = self.ALPHA_I
new_oe.T_INCIDENCE = self.T_INCIDENCE
new_oe.T_SOURCE = self.T_SOURCE
new_oe.T_IMAGE = self.T_IMAGE
new_oe.T_REFLECTION = self.T_REFLECTION
#TODO due to an incomprehensible bug, the FILE_SOURCE variable in oe
#changes from len=1024 to len=2048 when shadow runs. Therefore gives
#a crash when copying to a new object. With this we force the dimension
new_oe.FILE_SOURCE = self.FILE_SOURCE[:1023]
new_oe.FILE_RIP = self.FILE_RIP
new_oe.FILE_REFL = self.FILE_REFL
new_oe.FILE_MIR = self.FILE_MIR
new_oe.FILE_ROUGH = self.FILE_ROUGH
new_oe.FZP = self.FZP
new_oe.HOLO_R1 = self.HOLO_R1
new_oe.HOLO_R2 = self.HOLO_R2
new_oe.HOLO_DEL = self.HOLO_DEL
new_oe.HOLO_GAM = self.HOLO_GAM
new_oe.HOLO_W = self.HOLO_W
new_oe.HOLO_RT1 = self.HOLO_RT1
new_oe.HOLO_RT2 = self.HOLO_RT2
new_oe.AZIM_FAN = self.AZIM_FAN
new_oe.DIST_FAN = self.DIST_FAN
new_oe.COMA_FAC = self.COMA_FAC
new_oe.ALFA = self.ALFA
new_oe.GAMMA = self.GAMMA
new_oe.R_IND_OBJ = self.R_IND_OBJ
new_oe.R_IND_IMA = self.R_IND_IMA
new_oe.R_ATTENUATION_OBJ = self.R_ATTENUATION_OBJ
new_oe.R_ATTENUATION_IMA = self.R_ATTENUATION_IMA
new_oe.F_R_IND = self.F_R_IND
new_oe.FILE_R_IND_OBJ = self.FILE_R_IND_OBJ
new_oe.FILE_R_IND_IMA = self.FILE_R_IND_IMA
new_oe.RUL_A1 = self.RUL_A1
new_oe.RUL_A2 = self.RUL_A2
new_oe.RUL_A3 = self.RUL_A3
new_oe.RUL_A4 = self.RUL_A4
new_oe.F_POLSEL = self.F_POLSEL
new_oe.F_FACET = self.F_FACET
new_oe.F_FAC_ORIENT = self.F_FAC_ORIENT
new_oe.F_FAC_LATT = self.F_FAC_LATT
new_oe.RFAC_LENX = self.RFAC_LENX
new_oe.RFAC_LENY = self.RFAC_LENY
new_oe.RFAC_PHAX = self.RFAC_PHAX
new_oe.RFAC_PHAY = self.RFAC_PHAY
new_oe.RFAC_DELX1 = self.RFAC_DELX1
new_oe.RFAC_DELX2 = self.RFAC_DELX2
new_oe.RFAC_DELY1 = self.RFAC_DELY1
new_oe.RFAC_DELY2 = self.RFAC_DELY2
new_oe.FILE_FAC = self.FILE_FAC
new_oe.F_SEGMENT = self.F_SEGMENT
new_oe.ISEG_XNUM = self.ISEG_XNUM
new_oe.ISEG_YNUM = self.ISEG_YNUM
new_oe.FILE_SEGMENT = self.FILE_SEGMENT
new_oe.FILE_SEGP = self.FILE_SEGP
new_oe.SEG_LENX = self.SEG_LENX
new_oe.SEG_LENY = self.SEG_LENY
new_oe.F_KOMA = self.F_KOMA
new_oe.FILE_KOMA = self.FILE_KOMA
new_oe.F_EXIT_SHAPE = self.F_EXIT_SHAPE
new_oe.F_INC_MNOR_ANG = self.F_INC_MNOR_ANG
new_oe.ZKO_LENGTH = self.ZKO_LENGTH
new_oe.RKOMA_CX = self.RKOMA_CX
new_oe.RKOMA_CY = self.RKOMA_CY
new_oe.F_KOMA_CA = self.F_KOMA_CA
new_oe.FILE_KOMA_CA = self.FILE_KOMA_CA
new_oe.F_KOMA_BOUNCE = self.F_KOMA_BOUNCE
new_oe.X_RIP_AMP = self.X_RIP_AMP
new_oe.X_RIP_WAV = self.X_RIP_WAV
new_oe.X_PHASE = self.X_PHASE
new_oe.Y_RIP_AMP = self.Y_RIP_AMP
new_oe.Y_RIP_WAV = self.Y_RIP_WAV
new_oe.Y_PHASE = self.Y_PHASE
new_oe.N_RIP = self.N_RIP
new_oe.ROUGH_X = self.ROUGH_X
new_oe.ROUGH_Y = self.ROUGH_Y
new_oe.OE_NUMBER = self.OE_NUMBER
new_oe.IDUMMY = self.IDUMMY
new_oe.DUMMY = self.DUMMY
new_oe.CX_SLIT = copy.deepcopy(self.CX_SLIT)
new_oe.CZ_SLIT = copy.deepcopy(self.CZ_SLIT)
new_oe.D_PLATE = copy.deepcopy(self.D_PLATE)
new_oe.FILE_ABS = copy.deepcopy(self.FILE_ABS)
new_oe.FILE_SCR_EXT = copy.deepcopy(self.FILE_SCR_EXT)
new_oe.I_ABS = copy.deepcopy(self.I_ABS)
new_oe.I_SCREEN = copy.deepcopy(self.I_SCREEN)
new_oe.I_SLIT = copy.deepcopy(self.I_SLIT)
new_oe.I_STOP = copy.deepcopy(self.I_STOP)
new_oe.K_SLIT = copy.deepcopy(self.K_SLIT)
new_oe.RX_SLIT = copy.deepcopy(self.RX_SLIT)
new_oe.RZ_SLIT = copy.deepcopy(self.RZ_SLIT)
new_oe.SCR_NUMBER = copy.deepcopy(self.SCR_NUMBER)
new_oe.SL_DIS = copy.deepcopy(self.SL_DIS)
new_oe.THICK = copy.deepcopy(self.THICK)
new_oe.CCC = copy.deepcopy(self.CCC)
return new_oe
def mirinfo(self, title=None):
'''
mimics SHADOW mirinfo postprocessor. Returns a text array.
:return: a text array with the result
'''
#
txt = ''
type1 = {}
type1['1'] = 'SPHERICAL '
type1['2'] = 'ELLIPTICAL '
type1['3'] = 'TOROIDAL '
type1['4'] = 'PARABOLICAL '
type1['5'] = 'PLANE '
type1['6'] = 'CODLING SLIT'
type1['7'] = 'HYPERBOLICAL'
type1['8'] = 'CONICAL '
type1['9'] = 'POLYNOMIAL '
type1['10'] = 'CONIC EXTNAL'
type1['11'] = ' '
type1['12'] = ' '
TOPLIN = '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n'
T20 = ' '
T60 = T20 + T20 + T20
txt += TOPLIN
txt += '******************** MIRROR DESCRIPTION ********************\n'
if title == None:
txt += '\n\n'
else:
txt += title+'\n'
txt += TOPLIN
txt += 'Surface figure was defined as: %s \n'%(type1[str(self.FMIRR)])
if self.FCYL == 0:
txt += 'Cylindrical figure NO\n'
else:
txt += 'Cylindrical figure YES\n'
txt += 'Cylinder axis angle from X-axis %f \n'%(self.CIL_ANG*180.0/numpy.pi)
if self.F_ROUGHNESS == 1:
txt += 'Roughness on from '+self.FILE_ROUGH.strip()
txt += 'RMS in Y (angstroms) %f \n'%(self.ROUGH_Y)
txt += 'RMS in X (angstroms) %f \n'%(self.ROUGH_X)
if self.F_REFRAC == 0:
txt += 'Element type REFLECTOR\n'
else:
txt += 'Element type REFRACTOR\n'
if ((self.F_GRATING == 0) and (self.F_CRYSTAL ==0)):
if self.F_FACET == 1:
txt += 'Element type Faceted Mirror\n'
txt += 'Facet size (X) %f\n'%(self.RFAC_LENX)
txt += 'Facet size (Y) %f\n'%(self.RFAC_LENY)
txt += 'Facet polynomial from %s\n'%(self.FILE_FAC.strip().decode())
if self.F_POLSEL == 3:
txt += 'Intercept used CLOSEST\n'
if self.F_POLSEL == 2:
txt += 'Intercept used 2nd CLOSEST\n'
if self.F_POLSEL == 1:
txt += 'Intercept used 2nd FARTHEST\n'
if self.F_POLSEL == 4:
txt += 'Intercept used FARTHEST\n'
if self.F_KOMA == 1:
txt += 'Element type Multi-bounce Tube Array\n'
if self.F_KOMA_CA == 1:
txt += 'Paramters from %s\n'%(self.FILE_KOMA_CA.strip().decode())
txt += 'Tube radii specified as (r(Z))**2\n'
else:
txt += 'Paramters from %s\n'(self.FILE_KOMA.strip().decode())
txt += 'Tube radii specified as r(Z)\n'
if ((self.F_GRATING == 0) and (self.F_CRYSTAL == 1)):
txt += 'Element type CRYSTAL\n'
txt += 'Lattice Spacing %f\n'%(self.D_SPACING)
txt += 'Bragg Reflection from %s\n'%(self.FILE_REFL.strip().decode())
if self.F_MOSAIC == 1:
txt += 'MOSAIC Crystal selected \n'
txt += 'Mosaic crystal spread (st. dev) [DEG] %f\n'%(self.SPREAD_MOS*180.0/numpy.pi)
txt += 'Mosaic crystal thickness [cm] %f\n'%(self.THICKNESS)
else:
if self.F_BRAGG_A == 1:
txt += 'Asymmetric Cut angle [DEG] %f\n'%(self.A_BRAGG*180.0/numpy/pi)
if self.F_JOHANSSON == 1:
txt += 'JOHANSSON Geometry selected \n'
txt += 'Johansson radius %f\n'(self.R_JOHANSSON)
if self.F_GRATING == 1:
if self.FZP == 1:
txt += 'Element type Fresnel Zone Plate\n'
txt += 'Element type GRATING\n'
txt += 'Order choosen ( inside are < 0 ) *d\n'%(self.ORDER)
if self.F_CENTRAL == 1:
txt += 'Automatic Tuning YES\n'
if ((self.F_MONO == 0) and (self.F_CRYSTAL == 0)):
txt += 'Mount SEYA / TGM\n'
if ((self.F_MONO == 0) and (self.F_CRYSTAL == 1)):
txt += 'Mount BRAGG\n'
if ((self.F_MONO == 1) and (self.F_CRYSTAL == 0)):
txt += 'Mount ERG\n'
if ((self.F_MONO == 2) and (self.F_CRYSTAL == 0)):
txt += 'Mount Const. INCIDENCE\n'
if ((self.F_MONO == 3) and (self.F_CRYSTAL == 0)):
txt += 'Mount Const. DIFFRACTION\n'
if ((self.F_MONO == 4) and (self.F_CRYSTAL == 0)):
txt += 'Mount Const. BLAZE\n'
if (self.F_RULING == 0) and (self.F_CRYSTAL == 0):
txt += 'Constant ruling [ lines/cm ] %f\n'%(self.RULING)
if (self.F_RULING == 1) and (self.F_CRYSTAL == 0):
txt += 'Uniform ruling. At pole [ lines/cm ] %f\n'%(self.RULING)
if (self.F_RULING == 2) and (self.F_CRYSTAL == 0):
txt += 'Holographic grating. Recording Wavelength: %f\n'%(self.RULING)
txt += 'Input Slit Dist.'+T20+'Exit Slit Dist.'+T20+'Input Slit Angle',T60,'Exit Slit Angle\n'
txt += '%16.9g'%(self.HOLO_R1)+'%16.9g'%(self.HOLO_R2)+'%16.9g'%(self.HOLO_DEL)+'%16.9g'%(self.HOLO_GAM)+'\n'
txt += 'Input Slit rotation angle %f \n'%(self.HOLO_RT1*180.0/numpy.pi)
txt += 'Output Slit rotation angle %f \n'%(self.HOLO_RT2*180.0/numpy.pi)
if (self.F_PW == 0): txt += 'Spherical / Spherical\n'
if (self.F_PW == 1): txt += 'Plane / Spherical\n'
if (self.F_PW == 2): txt += 'Spherical / Plane\n'
if (self.F_PW == 3): txt += 'Plane / Plane\n'
if (self.F_PW_C == 0): txt += 'Spherical / Spherical\n'
if (self.F_PW_C == 1): txt += 'Cylindrical / Spherical\n'
if (self.F_PW_C == 2): txt += 'Spherical / Cylindrical\n'
if (self.F_PW_C == 3): txt += 'Cylindrical / Cylindrical\n'
if (self.F_VIRTUAL == 0): txt += 'Real / Real\n'
if (self.F_VIRTUAL == 1): txt += 'Real / Virtual\n'
if (self.F_VIRTUAL == 2): txt += 'Virtual / Real\n'
if (self.F_VIRTUAL == 3): txt + 'Virtual / Virtual\n'
if (self.F_RULING == 5) and (self.F_CRYSTAL == 0):
txt += 'Mechanically ruled grating. Polinomial Coefficients: \n'
txt += 'Zero order term Coefficient: %f\n'%(self.RULING)
txt += 'First %f\n'%(self.RUL_A1)
txt += 'Second %f\n'%(self.RUL_A2)
txt += 'Third %f\n'%(self.RUL_A3)
txt += 'Fourth %f\n'%(self.RUL_A4)
if (self.F_RULING == 3) and (self.F_CRYSTAL == 0):
txt += 'Oriental fan type grating.\n'
txt += 'Fan pole angle from Y axis %f\n'%(self.AZIM_FAN)
txt += ' distance from grating pole %f\n'%(self.DIST_FAN)
txt += 'Coma correction factor %f\n'%(self.COMA_FAC)
txt += 'Line density at grating pole %f\n'%(self.RULING)
if self.F_REFRAC == 1:
txt += 'Relative Index of Refraction %f\n'%(self.ALFA)
if self.F_REFLEC == 0:
txt += 'Reflectivity OFF\n'
else:
if self.F_REFL == 0:
txt += 'Reflectivity ON coefficients from: %s'%(self.FILE_REFL.strip().decode())
if self.F_REFL == 1:
txt += 'Reflectivity ON coefficients from TT:'
if self.F_REFL == 2:
txt += 'Multilayer ON coefficients and geometry from : %s'%(self.FILE_REFL.strip().decode())
if self.F_REFLEC == 1: txt += 'Polarization dependence YES\n'
if self.F_REFLEC == 2: txt += 'Polarization dependence NO\n'
if self.FHIT_C == 0:
txt += 'Mirror dimensions UNLIMITED\n'
else:
if self.FSHAPE == 0:
txt += 'Invalid o.e. dimensions ( FSHAPE=0 )\n'
if self.FSHAPE == 1:
txt += 'Mirror dimensions ( rectangular ):\n'
txt += ' X plus: %f, X minus: %f, Y plus: %f, Y minus: %f\n'%\
(self.RWIDX1,self.RWIDX2,self.RLEN1,self.RLEN2)
if self.FSHAPE == 2:
txt += 'Mirror dimensions ( elliptical ) :\n'
txt += ' Major Axis: %f, Minor axis: %f \n'%\
(self.RWIDX2,self.RLEN2)
if self.FSHAPE == 3:
txt += 'Mirror dimensions ( elliptical + hole )\n'
txt += 'A. Outside border: %f, %f\n'%\
(self.RWIDX2,self.RLEN2)
txt += 'A. Inner border: %f, %f\n'%\
(self.RWIDX1,self.RLEN1)
txt += TOPLIN
txt += 'Central Axis parameters :\n'
txt += 'Source Plane Distance %f\n'%(self.T_SOURCE)
txt += 'Image Plane %f\n'%(self.T_IMAGE)
txt += 'Incidence Angle %f\n'%(self.T_INCIDENCE*180.0/numpy.pi)
txt += 'Reflection/Diffraction Angle %f\n'%(self.T_REFLECTION*180.0/numpy.pi)
if self.F_EXT == 1:
txt += 'Mirror parameters EXTERNAL\n'
else:
if self.FMIRR != 10:
txt += 'Mirror parameters COMPUTED\n'
if self.F_DEFAULT == 1:
txt += 'Same configuration as Central Axis YES\n'
else:
txt += 'Same configuration as Central Axis NO\n'
txt += 'Objective focus at %f\n'%(self.SSOUR)
txt += 'Image focus at %f\n'%(self.SIMAG)
txt += 'Incidence angle %f\n'%(self.THETA*180.0/numpy.pi)
txt += 'Parameters used follow:\n'
if self.FMIRR == 1:
txt += 'Spherical Radius %f\n'%(self.RMIRR)
if self.FMIRR == 2:
ECCENT = numpy.sqrt(self.AXMAJ**2-self.AXMIN**2)/self.AXMAJ
txt += ' Semi-major axis %f\n'%(self.AXMAJ)
txt += ' Semi-minor axis %f\n'%(self.AXMIN)
txt += ' Semi-focal-length %f\n'%(numpy.sqrt(self.AXMAJ**2-self.AXMIN**2))
txt += ' Eccentricity %f\n'%(ECCENT)
if self.FMIRR == 3:
txt += ' Major Radius (optical) %f\n'%(self.R_MAJ+self.R_MIN)
txt += ' Minor Radius %f\n'%(self.R_MIN)
if self.FMIRR == 4:
txt += ' Parabola Param. %f\n'%(self.PARAM)
if self.FMIRR == 5:
txt += ' Plane mirror \n'
if self.FMIRR == 6:
txt += ' Codling Slit\n'
if self.FMIRR == 7:
AFOCI = numpy.sqrt(self.AXMIN**2+self.AXMAJ**2)
ECCENT = AFOCI/numpy.abs(self.AXMAJ)
txt += ' Semi-major axis %f\n'%(self.AXMAJ)
txt += ' Semi-minor axis %f\n'%(self.AXMIN)
txt += ' Semi-focal-length %f\n'%(AFOCI)
txt += ' Eccentricity %f\n'%(ECCENT)
if self.FMIRR == 8:
txt += ' Cone half-angle %f\n'%(self.CONE_A*180.0/numpy.pi)
if self.FMIRR == 9:
txt += ' Polynomial Coeff file %s\n'%(self.FILE_MIR.strip().decode())
if self.FSTAT == 0:
txt += 'Source of this O.E. moved NO\n'
else:
txt += 'Source of this O.E. moved YES\n'
txt += 'In SOURCE reference frame: \n'
txt += 'Source Movement X: %f\n'%(self.X_SOUR)
txt += ' Y: %f\n'%(self.Y_SOUR)
txt += ' Z: %f\n'%(self.Z_SOUR)
txt += 'Source rot at X: %f\n'%(self.X_SOUR_ROT*180.0/numpy.pi)
txt += ' Y: %f\n'%(self.Y_SOUR_ROT*180.0/numpy.pi)
txt += ' Z: %f\n'%(self.Z_SOUR_ROT*180.0/numpy.pi)
txt += 'In MIRROR reference frame: \n'
txt += 'Source distance %f\n'%(self.RDSOUR)
txt += ' rotation %f\n'%(self.ALPHA_S*180.0/numpy.pi)
txt += 'Incidence angle %f\n'%(self.RTHETA*180.0/numpy.pi)
txt += 'Source offset X: %f\n'%(self.OFF_SOUX)
txt += ' Y: %f\n'%(self.OFF_SOUY)
txt += ' Z: %f\n'%(self.OFF_SOUZ)
if self.F_MOVE == 0:
txt += 'Mirror at pole position ( no mov. ) YES\n'
else:
txt += 'Mirror moved from pole. Parameters :\n'
txt += 'Displacement along X: %f\n'%(self.OFFX)
txt += ' Y: %f\n'%(self.OFFY)
txt += ' Z: %f\n'%(self.OFFZ)
txt += 'Rotation around X: %f\n'%(self.X_ROT*180.0/numpy.pi)
txt += ' Y: %f\n'%(self.Y_ROT*180.0/numpy.pi)
txt += ' Z: %f\n'%(self.Z_ROT*180.0/numpy.pi)
if ( (self.FMIRR == 1) or (self.FMIRR == 2) or (self.FMIRR == 4) or\
(self.FMIRR == 5) or (self.FMIRR == 7) or (self.FMIRR == 8) or\
(self.FMIRR == 9) or (self.FMIRR == 10) ):
txt += '\n'+TOPLIN
txt += 'OE surface in form of conic equation: \n'
txt += ' c[1]*X^2 + c[2]*Y^2 + c[3]*Z^2 + \n'
txt += ' c[4]*X*Y + c[5]*Y*Z + c[6]*X*Z + \n'
txt += ' c[7]*X + c[8]*Y + c[9]*Z + c[10] = 0 \n'
txt += ' with \n'
for i in range(10):
txt += ' c[%d]= %f\n'%(i,self.CCC[i])
txt += TOPLIN
txt += '*************** E N D ***************\n'
txt += TOPLIN
return txt
class CompoundOE():
def __init__(self,list=None, name=''):
if list == None:
self.list = []
else:
self.list = list
self.name = name
self = list #.__init__()
def set_name(self,name):
self.name = name
def number_oe(self):
return len(self.list)
def info(self,file=''):
"""
write a summary of the real distances, focal distances and orientation angles.
:param file: set to a file name to dump tesult into ir
:return: a text array
"""
# print("CompoundOE name: %s, found %d elements"%(self.name,self.number_oe()))
# for i,j in enumerate(self.list):
# print('oe %d, p=%f, q=%f'%(1+i,j.T_SOURCE,j.T_IMAGE))
txt = ' ******** SUMMARY OF DISTANCES ********\n'
txt += ' ** DISTANCES FOR ALL O.E. [cm] ** \n'
txt += "%12s %12s %12s %12s %12s %12s \n"%('OE','TYPE','p[cm]','q[cm]','src-oe','src-screen')
tot=0.0
alphatot=0.0
deflection_H = 0.0
deflection_V = 0.0
pihalf = numpy.pi/2
txt1 = ''
txt2 = ''
oeshape = '?'
for i,oe in enumerate(self.list):
#1) Distances summary
oetype = 'UNKNOWN'
if oe.F_REFRAC == 1:
oetype = 'REFRACTOR'
else:
oetype = 'MIRROR'
if oe.F_CRYSTAL == 1: oetype = 'CRYSTAL'
if oe.F_GRATING == 1: oetype = 'GRATING'
if oe.F_REFRAC == 2: oetype = 'EMPTY'
tot = tot + oe.T_SOURCE + oe.T_IMAGE
totoe = tot - oe.T_IMAGE
line="%12d %12s %12.2f %12.2f %12.2f %12.2f \n"%(i+1,oetype,oe.T_SOURCE,oe.T_IMAGE,totoe,tot)
txt1 += line
# 2) focusing summary
if oe.FMIRR != 5 and oe.FMIRR != 9:
if oe.FMIRR == 1:
if oe.FCYL == 0:
oeshape='SPHERE'
else:
oeshape='CYLINDER'
if oe.FMIRR == 2:
if oe.FCYL == 0:
oeshape='ELLIPSOID'
else:
oeshape='ELLIPSE'
if oe.FMIRR == 3:
oeshape='TOROID'
if oe.FMIRR == 4:
if oe.FCYL == 0:
oeshape='PARABOLID'
else:
oeshape='PARABOLA'
if oe.FMIRR == 6:
oeshape='CODLING SLIT'
if oe.FMIRR == 7:
if oe.FCYL == 0:
oeshape='HYPERBOLOID'
else:
oeshape='HYPERBOLA'
if oe.FMIRR == 8:
oeshape='CONE'
if oe.FMIRR == 9:
oeshape='POLYNOMIAL'
if oe.FMIRR == 10:
oeshape='CONIC COEFF'
if oe.F_DEFAULT == 1:
pp = oe.T_SOURCE
qq = oe.T_IMAGE
else:
pp = oe.SSOUR
qq = oe.SIMAG
if oe.F_EXT == 1:
line = "%10d %10s %10s %10s %10s \n"%( i+1,oeshape,'?','?','?')
else:
line = "%10d %10s %10.2f %10.2f %10.2f \n)'"%(i+1,oeshape,pp,qq,pp/qq)
txt2 += line
# 3) total deflection
alphatot = alphatot + oe.ALPHA
if oe.IDUMMY == 0: # oe not changed by shadow, angles in deg
torad = numpy.pi/180.0
else:
torad = 1.0
deflection_H = deflection_H + numpy.sin(alphatot*torad) * ( (pihalf-oe.T_INCIDENCE*torad) + (pihalf-oe.T_REFLECTION*torad) )
deflection_V = deflection_V + numpy.cos(alphatot*torad) * ( (pihalf-oe.T_INCIDENCE*torad) + (pihalf-oe.T_REFLECTION*torad) )
txt += txt1
txt += '\n'
txt += ' ** FOCUSING ELEMENTS ** \n'
# focusing elements
line = "%10s %10s %10s %10s %10s \n"%('OE','SHAPE','p_foc','q_foc','1/M')
txt += line
txt += txt2
txt += '\n'
line = 'Sum of Alphas %f \n'%(alphatot)
txt += line
line = 'Sum of Alphas Mod 180 deg %f \n'%( numpy.mod(alphatot*torad*180/numpy.pi,180))
txt += line
line = 'Sum of Alphas Mod 360 deg %f \n'%( numpy.mod(alphatot*torad*180/numpy.pi,360))
txt += line
txt += '\n'
if oe.IDUMMY != 1:
txt += "**Warning: SHADOW did not run, therefore autosetting angles are not considered**"
line = 'Total deflection angle H = %12.6f rad = %9.3f deg\n'%(deflection_H*torad,deflection_H*torad*180/numpy.pi)
txt += line
line = 'Total deflection angle V = %12.6f rad = %9.3f deg \n'%(deflection_V*torad,deflection_V*torad*180/numpy.pi)
txt += line
if file != '':
f = open(file,mode='w')
for line in txt:
f.write(line)
f.close()
print("File written to disk (compoundOE summary): ",file)
return(txt)
def mirinfo(self):
"""
Mimics the SHADOW mirinfo
:return: a text array
"""
txt = ""
for i,oe in enumerate(self.list):
txt += oe.mirinfo(title="oe %d in compoundOE name: %s "%(i+1,self.name))
return txt
def get_oe_index(self,oe_index):
"""
returns the pointer to the oe with index oe_index
:param oe_index:
:return:
"""
if oe_index >= self.number_oe():
print("Error returning element index %d : Not enough optical elements (max index=%d)"%(oe_index,self.number_oe()-1))
return None
else:
tmp = self.list[oe_index]
return tmp
def duplicate(self):
"""
Makes a copy of the compound optical element
:return:
"""
new_coe = CompoundOE()
new_coe.set_name(self.name)
for i,oe in enumerate(self.list):
tmp = oe.duplicate()
new_coe.append(tmp)
return new_coe
def add_drift_space_downstream(self,dd):
"""
Adds empty space to the last element of the compound oe
:param dd: The distance
:return: None
"""
self.list[-1].T_IMAGE += dd
def add_drift_space_upstream(self,dd):
"""
Adds empty space before the first element of the compound oe
:param dd: The distance
:return: None
"""
self.list[0].T_SOURCE += dd
def append(self,item):
"""
append an instance of Shadow.OW or Shadow.CompoundOE
:param item: an OE or CompoundOE to append
:return: the CompoundOE updated with a copy of item appended
"""
if isinstance(item, OE):
self.list.append(item.duplicate())
return self
if isinstance(item, CompoundOE):
for i in range(item.number_oe()):
self.list.append(item.list[i].duplicate())
return self
print("Failed to append: object not understood: %s. "%type(item))
return self
def append_lens(self,p,q,surface_shape=1,convex_to_the_beam=1,diameter=None,cylinder_angle=None,\
prerefl_file=None, refraction_index=1.0, attenuation_coefficient=0.0,\
radius=500e-2,interthickness=0.001,use_ccc=0):
"""
Adds and sets a lens (two interfaces) to the compound optical element
:param p: distance source-first lens interface
:param q: distance last lens interface to image plane
:param surface_shape: 1=sphere 4=paraboloid, 5=plane (other surfaces not yet implamented)
:param convex_to_the_beam: convexity of the first interface exposed to the beam 0=No, 1=Yes
the second interface has opposite convexity
:param diameter: lens diameter. Set to None for infinite dimension
:param cylinder_angle: None=not cylindrical, 0=meridional 90=sagittal
:param prerefl_file:file name (from prerefl) to get the refraction index. If set
then the keywords refraction_index and attenuation_coefficient are not used.
:param refraction_index: n (real) #ignored if prerefl_file points to file.
:param attenuation_coefficient:mu (real); ignored if prerefl file points to file. :param radius: lens radius (for pherical, or radius at the tip for paraboloid)
:param interthickness: lens thickness (distance between the two interfaces at the center of the lenses)
:param use_ccc 0=set shadow using surface shape (FMIRR=1,4,5), 1=set shadow using CCC coeffs (FMIRR=10)
:return: self
"""
oe1 = OE()
oe2 = OE()
#set constant values for both interfaces
oe1.T_INCIDENCE = 0.0
oe1.T_REFLECTION = 180.0
oe2.T_INCIDENCE = 0.0
oe2.T_REFLECTION = 180.0
oe1.F_REFRAC = 1
oe2.F_REFRAC = 1
oe1.F_EXT = 1
oe2.F_EXT = 1
# write no output files. If wanted they are written by python in traceCompoundOE
oe1.FWRITE = 3
oe2.FWRITE = 3
if use_ccc:
oe1.FMIRR = 10
oe2.FMIRR = 10
else:
oe1.FMIRR = surface_shape
oe2.FMIRR = surface_shape
#set values that depend on the interface number
oe1.T_SOURCE = p
oe1.T_IMAGE = interthickness*0.5
oe2.T_SOURCE = interthickness*0.5
oe2.T_IMAGE = q
#refraction index
if prerefl_file != None and prerefl_file!= "":
oe1.F_R_IND = 2 #keyboard in object space, file in image space
oe1.R_IND_OBJ = 1.0
oe1.R_ATTENUATION_OBJ = 0.0
oe1.FILE_R_IND_IMA = prerefl_file.encode('utf-8')
oe2.F_R_IND = 1 #file in object space, keyboard in image space
oe2.FILE_R_IND_OBJ = prerefl_file.encode('utf-8')
oe2.R_IND_IMA = 1.0
oe2.R_ATTENUATION_IMA = 0.0
else:
oe1.F_R_IND = 0
oe1.R_IND_OBJ = 1.0
oe1.R_ATTENUATION_OBJ = 0.0
oe1.R_IND_IMA = refraction_index
oe1.R_ATTENUATION_IMA = attenuation_coefficient
oe2.F_R_IND = 0
oe2.R_IND_OBJ = refraction_index
oe2.R_ATTENUATION_OBJ = attenuation_coefficient
oe2.R_IND_IMA = 1.0
oe2.R_ATTENUATION_IMA = 0.0
#diameter
if diameter == None:
oe1.FHIT_C = 0
oe2.FHIT_C = 0
else:
#if diameter is scalar, set a round aperture
if isinstance(diameter,(int,float)):
oe1.FHIT_C = 1
oe2.FHIT_C = 1
oe1.FSHAPE = 2 #ellipse
oe2.FSHAPE = 2
oe1.RWIDX1 = 0.0
oe2.RWIDX1 = 0.0
oe1.RWIDX2 = diameter*0.5
oe2.RWIDX2 = diameter*0.5
oe1.RLEN1 = 0.0
oe2.RLEN1 = 0.0
oe1.RLEN2 = diameter*0.5
oe2.RLEN2 = diameter*0.5
#if diameter is a list or tuple, set a rectanglular aperture
else:
oe1.FHIT_C = 1
oe2.FHIT_C = 1
oe1.FSHAPE = 1 #rectangle
oe2.FSHAPE = 1
oe1.RWIDX1 = 0.5*diameter[0]
oe2.RWIDX1 = 0.5*diameter[0]
oe1.RWIDX2 = 0.5*diameter[0]
oe2.RWIDX2 = 0.5*diameter[0]
oe1.RLEN1 = 0.5*diameter[1]
oe2.RLEN1 = 0.5*diameter[1]
oe1.RLEN2 = 0.5*diameter[1]
oe2.RLEN2 = 0.5*diameter[1]
#radii
if surface_shape == 1: #spherical
oe1.RMIRR = radius
oe2.RMIRR = radius
oe1.CCC = numpy.array([1.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,-2.0*radius,0.0])
oe2.CCC = numpy.array([1.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,-2.0*radius,0.0])
if surface_shape == 4: #parabolical
oe1.PARAM = radius
oe2.PARAM = radius
oe1.CCC = numpy.array([1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,-2.0*radius,0.0])
oe2.CCC = numpy.array([1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,-2.0*radius,0.0])
if surface_shape != 1 and surface_shape != 4 and surface_shape != 5:
print("Error setting lens: surface shape not implemented")
if convex_to_the_beam == 1:
if use_ccc == 0:
oe1.F_CONVEX = 1
oe2.F_CONVEX = 0
else:
oe1.F_CONVEX = 0 # note that the needed changes are done here, nothing to do in shadow3
oe2.F_CONVEX = 0
oe1.CCC[4] = -oe1.CCC[4]
oe1.CCC[5] = -oe1.CCC[5]
oe1.CCC[8] = -oe1.CCC[8]
else:
if use_ccc == 0:
oe1.F_CONVEX = 0
oe2.F_CONVEX = 1
else:
oe1.F_CONVEX = 0
oe2.F_CONVEX = 0 # note that the needed changes are done here, nothing to do in shadow3
oe2.CCC[4] = -oe2.CCC[4]
oe2.CCC[5] = -oe2.CCC[5]
oe2.CCC[8] = -oe2.CCC[8]
if cylinder_angle == None:
oe1.FCYL = 0
oe2.FCYL = 0
else:
oe1.FCYL = 1
oe2.FCYL = 1
oe1.CIL_ANG = cylinder_angle
oe2.CIL_ANG = cylinder_angle
print("appending 2 elements")
self.append(oe1)
self.append(oe2)
return self
def append_crl(self,p0,q0, nlenses=30, slots_empty=0, radius=500e-2, thickness=625e-4, interthickness=0.001, \
surface_shape=1, convex_to_the_beam=1, diameter=None, cylinder_angle=None,\
prerefl_file=None, refraction_index=1.0, attenuation_coefficient=0.0,\
use_ccc=0):
"""
Builds the stack of oe for a CRL.
Notes: if nlenses=0 sets a single "lens" with flat interfaces and no change of refraction index (like empty)
The refraction index should be input either by i) prerefl_index or ii) refraction_index and
attenuation_coefficient keywords. The first one is prioritary.
slots_empty: if different from zero, adds a distance equal to thickness*slots_empty to q0. The
intention is to simulate a lens that is off but the path should be considered.
:param p0:distance source-first lens interface
:param q0:distance last lens interface to image plane
:param nlenses: number of lenses
:param slot_empty: number of empty slots (default=0)
:param radius:lens radius (for pherical, or radius at the tip for paraboloid)
:param thickness: lens thickness (piling thickness)
:param interthickness:lens thickness (distance between the two interfaces at the center of the lenses)
:param surface_shape:1=sphere 4=paraboloid, 5=plane (other surfaces not yet implamented)
:param convex_to_the_beam:convexity of the first interface exposed to the beam 0=No, 1=Yes
the second interface has opposite convexity
:param diameter:lens diameter. Set to None for infinite dimension
:param cylinder_angle:None=not cylindrical, 0=meridional 90=sagittal
:param prerefl_file:file name (from prerefl) to get the refraction index. If set
then the keywords refraction_index and attenuation_coefficient are not used.
:param refraction_index: n (real) #ignored if prerefl_file points to file.
:param attenuation_coefficient:mu (real); ignored if prerefl file points to file.
:param use_ccc:0=set shadow using surface shape (FMIRR=1,4,5), 1=set shadow using CCC coeffs (FMIRR=10)
:return: self
"""
p_or_q = 0.5*(thickness - interthickness)
if nlenses == 0: # add an empty lens + a distance (slots_empty-1) for keeping the total distance
pi = p0 + p_or_q
qi = q0 + p_or_q + max(slots_empty-1,0)*thickness
self.append_lens(pi, qi, surface_shape=5, \
interthickness=interthickness, \
refraction_index=1.0, attenuation_coefficient=0.0, \
use_ccc=use_ccc)
else:
for i in range(nlenses):
pi = p_or_q
qi = p_or_q
if i == 0:
pi += p0
if i == nlenses-1:
qi += q0 + slots_empty*thickness
self.append_lens(pi, qi, surface_shape=surface_shape, convex_to_the_beam=convex_to_the_beam,\
diameter=diameter, cylinder_angle=cylinder_angle, radius=radius,\
interthickness=interthickness, prerefl_file=prerefl_file, \
refraction_index=refraction_index, attenuation_coefficient=attenuation_coefficient, \
use_ccc=use_ccc)
return self
def append_transfocator(self,p0,q0, nlenses=[4,8], slots_empty=0, radius=500e-2, thickness=625e-4, \
interthickness=0.001, \
surface_shape=1, convex_to_the_beam=1, diameter=None, cylinder_angle=None,\
prerefl_file=None, refraction_index=1.0, attenuation_coefficient=0.0,\
use_ccc=0):
"""
Builds the stack of oe for a TRANSFOCATOR. A transfocator is a stack of CRLs. append_transfocator
is therefore very similar to append_crl, but now arguments are lists instead of scalar. However,
if the value of a particular keyword is an scalar and a list is expected, then it is automatically
replicated "nslots" times, where nslots=len(nlenses)
Notes: if nlenses=0 sets a single "lens" with flat interfaces and no change of refraction index (like empty)
The refraction index should be input either by i) prerefl_index or ii) refraction_index and
attenuation_coefficient keywords. The first one is prioritary.
slots_empty: if different from zero, adds a distance equal to thickness*slots_empty to q0. The
intention is to simulate a lens that is off but the path should be considered.
Note that all arrays must be "list". If you are using numpy arrays, convert them: array.tolist()
:param p0 (list):distance previous continuation plane to first lens for each CRL
(usually [p,0,0,...]
:param q0 (scalar):distance last lens in each CRLto continuation plane
:param nlenses (list): number of lenses
:param slots_empty (list): number of empty slots
:param radius (list):lens radius (for pherical, or radius at the tip for paraboloid)
:param thickness (list): lens thickness (piling thickness)
:param interthickness (list):lens thickness (distance between the two interfaces at the center of the lenses)
:param surface_shape (list):1=sphere 4=paraboloid, 5=plane (other surfaces not yet implamented)
:param convex_to_the_beam (list):convexity of the first interface exposed to the beam 0=No, 1=Yes
the second interface has opposite convexity
:param diameter (list):lens diameter. Set to None for infinite dimension
:param cylinder_angle (list):None=not cylindrical, 0=meridional 90=sagittal
:param prerefl_file (list):file name (from prerefl) to get the refraction index. If set
then the keywords refraction_index and attenuation_coefficient are not used.
:param refraction_index (list): n (real) #ignored if prerefl_file points to file.
:param attenuation_coefficient (list):mu (real); ignored if prerefl file points to file.
:param use_ccc (scalar):0=set shadow using surface shape (FMIRR=1,4,5), 1=set shadow using CCC coeffs (FMIRR=10)
:return: self
"""
# replicate inputs when they are scalar
nslots = len(nlenses)
if isinstance(p0, list) == False: p0 = [ p0 for i in range(nslots)]
if isinstance(q0, list) == False: q0 = [ q0 for i in range(nslots)]
if isinstance(slots_empty, list) == False: slots_empty = [ slots_empty for i in range(nslots)]
if isinstance(radius, list) == False: radius = [ radius for i in range(nslots)]
if isinstance(thickness, list) == False: thickness = [ thickness for i in range(nslots)]
if isinstance(interthickness, list) == False: interthickness = [ interthickness for i in range(nslots)]
if isinstance(surface_shape, list) == False: surface_shape = [ surface_shape for i in range(nslots)]
if isinstance(convex_to_the_beam, list) == False: convex_to_the_beam = [ convex_to_the_beam for i in range(nslots)]
if isinstance(diameter, list) == False: diameter = [ diameter for i in range(nslots)]
if isinstance(cylinder_angle, list) == False: cylinder_angle = [ cylinder_angle for i in range(nslots)]
if isinstance(prerefl_file, list) == False: prerefl_file = [ prerefl_file for i in range(nslots)]
if isinstance(refraction_index, list) == False: refraction_index = [ refraction_index for i in range(nslots)]
if isinstance(attenuation_coefficient, list) == False:
attenuation_coefficient = [ attenuation_coefficient for i in range(nslots)]
for i in range(len(nlenses)):
self.append_crl(p0[i], q0[i], nlenses=nlenses[i], slots_empty=slots_empty[i], \
radius=radius[i], thickness=thickness[i], interthickness=interthickness[i], \
surface_shape=surface_shape[i],convex_to_the_beam=convex_to_the_beam[i],\
diameter=diameter[i], cylinder_angle=cylinder_angle[i],\
prerefl_file=prerefl_file[i],refraction_index=refraction_index[i], \
attenuation_coefficient=attenuation_coefficient[i],\
use_ccc=0)
return self
def append_kb(self, p0, q0, grazing_angles_mrad=[3.0,3.0],separation=100.0, mirror_orientation_angle=0,\
focal_positions=[0,0],shape=[2,2],\
dimensions1=[0,0],dimensions2=[0,0],\
reflectivity_kind=[0,0],reflectivity_files=["",""],surface_error_files=["",""]):
"""
Appends a KB (Kirkpatrick-Baez) system
First mirror is focusing in vertical plane, second mirror focusses in horizontal plane.
Note that SHADOW rotates the image plane because the second mirror has mirror orientation angle 90 ded
:param p0: distance from previous source plane (continuation plane) to center of KB (!!!)
:param q0: distance from center of KB (!!!) to image plane (continuation plane)
:param grazing_angles_mrad: grazing angle in mrad for both mirrors. Default: grazing_angles_mrad=[3.0,3.0]
:param separation: separation between the mirrors from center of first mirror (VFM) to center of second one (HFM).
()Default=100). The continuation plane is set in the middle.
:param mirror_orientation_angle: set the mirror orientation angle with respect to the previous o.e. for the
first mirror of the KB
:param focal_positions: the focal positions [p_focal,q_focal] measured from the center of KB.
If set to [0,0] then uses p0 and q0
:param shape: the shape code for the surfaces 1=spherica, 2=elliptical. Default: shape=[2,2]
:param dimensions1: the dimensions [width,length] for the first mirror. Default: [0,0] meaning infinite dimensions.
:param dimensions2: the dimensions [width,length] for the second mirror. Default: [0,0] meaning infinite dimensions.
:param reflectivity_kind: flag for reflectivity: 0=ideal reflector, 1=mirror, 2=multilayer. Default=[0,0]
If reflectivity is set to mirror or multilayer, the corresponding file must be entered in
the reflectivity_files keyword.
:param reflectivity_files: the reflectivity files, if needed. If mirror, the file must have been created
by prerefl. If multilayer, the file must come from pre_mlayer.
:param surface_error_files: Set to file names containing the surface error mesh.
Default: surface_error_files=["",""] which means that no surface error is considered.
:return: self
"""
oe1 = OE()
oe2 = OE()
#incidence angles
oe1.T_INCIDENCE = 90.0 - grazing_angles_mrad[0]*1e-3*180.0/numpy.pi
oe1.T_REFLECTION = oe1.T_INCIDENCE
oe2.T_INCIDENCE = 90.0 - grazing_angles_mrad[1]*1e-3*180.0/numpy.pi
oe2.T_REFLECTION = oe2.T_INCIDENCE
# orientation
oe1.ALPHA = mirror_orientation_angle # first VFM
oe2.ALPHA = 90.0 # second HFM
# distances
oe1.T_SOURCE = p0 - 0.5*separation
oe1.T_IMAGE = 0.5*separation
oe2.T_SOURCE = 0.5*separation
oe2.T_IMAGE = q0 - 0.5*separation
# mirror shape
oe1.FMIRR = shape[0] #1=spherical, 2=elliptical
oe2.FMIRR = shape[1]
oe1.FCYL = 1
oe2.FCYL = 1
oe1.CIL_ANG = 0
oe2.CIL_ANG = 0
# auto focusing
#focal positions from center of kb
if focal_positions[0] != 0:
pfocal = focal_positions[0]
else:
pfocal = p0
if focal_positions[1] != 0:
qfocal = focal_positions[1]
else:
qfocal = q0
oe1.F_EXT = 0 # internal
oe1.F_DEFAULT = 0
oe1.SSOUR = pfocal - 0.5*separation
oe1.SIMAG = 0.5*separation + qfocal
oe1.THETA = oe1.T_INCIDENCE
oe2.F_EXT = 0 # focii coincident=no
oe2.F_DEFAULT = 0
oe2.SSOUR = pfocal + 0.5*separation
oe2.SIMAG = qfocal - 0.5*separation
oe2.THETA = oe2.T_INCIDENCE
oe1.F_REFLEC = 0 # 0=skip reflectivity, 1=Full dependence
oe2.F_REFLEC = 0
oe1.F_REFL = 0 #prerefl
oe2.F_REFL = 0
oe1.FILE_REFL = ''.encode('utf-8')
oe2.FILE_REFL = ''.encode('utf-8')
if dimensions1 != [0,0]:
oe1.FHIT_C = 1 # mirror dimensions finite: yes (1), no(0).
oe1.FSHAPE = 1 # rectangle
oe1.RWIDX1 = 0.5 * dimensions1[0]
oe1.RWIDX2 = 0.5 * dimensions1[0]
oe1.RLEN1 = 0.5 * dimensions1[1]
oe1.RLEN2 = 0.5 * dimensions1[1]
else:
oe1.FHIT_C = 0 # mirror dimensions finite: yes (1), no(0).
if dimensions2 != [0,0]:
oe2.FHIT_C = 1 # mirror dimensions finite: yes (1), no(0).
oe2.FSHAPE = 1 # rectangle
oe2.RWIDX1 = 0.5 * dimensions2[0]
oe2.RWIDX2 = 0.5 * dimensions2[0]
oe2.RLEN1 = 0.5 * dimensions2[1]
oe2.RLEN2 = 0.5 * dimensions2[1]
else:
oe2.FHIT_C = 0 # mirror dimensions finite: yes (1), no(0).
#
# reflectivity
#
if reflectivity_kind[0] == 0: # ideal
oe1.F_REFLEC = 0
if reflectivity_kind[0] == 1: # mirror
oe1.F_REFLEC = 1
oe1.F_REFL = 0 # prerefl mirror
oe1.FILE_REFL = reflectivity_files[0].encode('utf-8')
if reflectivity_kind[0] == 2: # multilayer
oe1.F_REFLEC = 1
oe1.F_REFL = 2 # multilayer
oe1.FILE_REFL = reflectivity_files[0].encode('utf-8')
if reflectivity_kind[1] == 0: # ideal
oe2.F_REFLEC = 0
if reflectivity_kind[1] == 1: # mirror
oe2.F_REFLEC = 1
oe2.F_REFL = 0 # prerefl mirror
oe2.FILE_REFL = reflectivity_files[1].encode('utf-8')
if reflectivity_kind[1] == 2: # multilayer
oe2.F_REFLEC = 1
oe2.F_REFL = 2 # multilayer
oe2.FILE_REFL = reflectivity_files[1].encode('utf-8')
#
#surface errors
#
if surface_error_files[0] != "":
oe1.F_RIPPLE = 1
oe1.FILE_RIP = surface_error_files[0].encode('utf-8')
oe1.F_G_S = 2
else:
oe1.F_RIPPLE = 0
if surface_error_files[1] != "":
oe2.F_RIPPLE = 1
oe2.FILE_RIP = surface_error_files[1].encode('utf-8')
oe2.F_G_S = 2
else:
oe2.F_RIPPLE = 0
# write no output files. If wanted they are written by python in traceCompoundOE
oe1.FWRITE = 3
oe2.FWRITE = 3
self.append(oe1)
self.append(oe2)
return self
def append_monochromator_double_crystal(self, p0, q0, photon_energy_ev=14000,separation=0.0,\
dimensions1=[0,0],dimensions2=[0,0],\
reflectivity_file=""):
"""
Appends a double crystal monochromator (with plane crystals)
:param p0: distance from previous source plane (continuation plane) to center of first mirror
:param q0: distance from center of second mirror to image plane (continuation plane)
:param set_photon_energy: photon energy in eV to set the monochromator
:param separation: separation between the crystals (Default=0). The continuation plane is set in the middle.
:param dimensions1: the dimensions [width,length] for the first mirror. Default: [0,0] meaning infinite dimensions.
:param dimensions2: the dimensions [width,length] for the second mirror. Default: [0,0] meaning infinite dimensions.
:param reflectivity_files: the reflectivity files as created by bragg
:return: self
"""
oe1 = OE()
oe2 = OE()
# #incidence angles
# oe1.T_INCIDENCE = 90.0 - grazing_angles_mrad[0]*1e-3*180.0/numpy.pi
# oe1.T_REFLECTION = oe1.T_INCIDENCE
# oe2.T_INCIDENCE = 90.0 - grazing_angles_mrad[1]*1e-3*180.0/numpy.pi
# oe2.T_REFLECTION = oe2.T_INCIDENCE
oe1.F_CRYSTAL = 1
oe2.F_CRYSTAL = 1
# orientation
oe1.ALPHA = 0.0 # first VFM
oe2.ALPHA = 180.0 # second HFM
#
# distances
oe1.T_SOURCE = p0
oe1.T_IMAGE = 0.5*separation
oe2.T_SOURCE = 0.5*separation
oe2.T_IMAGE = q0
#
# crystal shape 5 (plane)
oe1.FMIRR = 5
oe2.FMIRR = 5
oe1.F_CENTRAL = 1
oe2.F_CENTRAL = 2
oe1.F_PHOT_CENT = 0 # eV
oe2.F_PHOT_CENT = 0 # eV
oe1.PHOT_CENT = photon_energy_ev
oe2.PHOT_CENT = photon_energy_ev
oe1.FILE_REFL = reflectivity_file.encode('utf-8')
oe2.FILE_REFL = reflectivity_file.encode('utf-8')
#
#
if dimensions1 != [0,0]:
oe1.FHIT_C = 1 # mirror dimensions finite: yes (1), no(0).
oe1.FSHAPE = 1 # rectangle
oe1.RWIDX1 = 0.5 * dimensions1[0]
oe1.RWIDX2 = 0.5 * dimensions1[0]
oe1.RLEN1 = 0.5 * dimensions1[1]
oe1.RLEN2 = 0.5 * dimensions1[1]
else:
oe1.FHIT_C = 0 # mirror dimensions finite: yes (1), no(0).
if dimensions2 != [0,0]:
oe2.FHIT_C = 1 # mirror dimensions finite: yes (1), no(0).
oe2.FSHAPE = 1 # rectangle
oe2.RWIDX1 = 0.5 * dimensions2[0]
oe2.RWIDX2 = 0.5 * dimensions2[0]
oe2.RLEN1 = 0.5 * dimensions2[1]
oe2.RLEN2 = 0.5 * dimensions2[1]
else:
oe2.FHIT_C = 0 # mirror dimensions finite: yes (1), no(0).
# write no output files. If wanted they are written by python in traceCompoundOE
oe1.FWRITE = 3
oe2.FWRITE = 3
self.append(oe1)
self.append(oe2)
return self
def dump_start_files(self,offset=0):
for i,oe in enumerate(self.list):
oe.write('start.%02d'%(i+1+offset))
print('File written to disk: start.%02d\n'%(i+1+offset))
def dump_systemfile(self,offset=0):
f = open('systemfile.dat','w')
for i,oe in enumerate(self.list):
f.write('start.%02d\n' %(i+1+offset))
f.close()
print('File written to disk: systemfile.dat')
def length(self):
length = 0.0
for i,oe in enumerate(self.list):
length += oe.T_SOURCE + oe.T_IMAGE
return length
class Source(ShadowLib.Source):
def __init__(self):
ShadowLib.Source.__init__(self)
def to_dictionary(self):
"""
returns a python dictionary of the Shadow.Source instance
:return: a dictionary
"""
mem = inspect.getmembers(self)
mydict = {}
for i,var in enumerate(mem):
if var[0].isupper():
mydict[var[0]]= var[1]
return(mydict)
def duplicate(self):
"""
makes a copy of the source
:return: new instance of Shadow.Source()
"""
src_new = Source()
mem = inspect.getmembers(self)
for i,var in enumerate(mem):
if var[0].isupper():
setattr(src_new,var[0],var[1])
return(src_new)
#Gaussian source
def set_divergence_gauss(self, sigmaxp, sigmazp):
"""
sets Gaussian source in divergence space
:param sigmaxp: SIGDIX for SHADOW
:param sigmazp: SIGDIZ for SHADOW
:return: self
"""
self.FDISTR = 3
self.HDIV1 = 1.0
self.HDIV2 = 1.0
self.VDIV1 = 1.0
self.VDIV2 = 1.0
self.SIGDIX = sigmaxp
self.SIGDIZ = sigmazp
return self
def set_spatial_gauss(self,sigmax, sigmaz):
"""
sets Gaussian source in real space
:param sigmax: SIGMAX for SHADOW.
:param sigmaz: SIGMAZ for SHADOW.
:return: self
"""
self.FSOUR = 3
self.SIGMAX = sigmax
self.SIGMAZ = sigmaz
return self
def set_gauss(self,sigmax,sigmaz,sigmaxp,sigmazp):
"""
Sets a Gaussian source in both real and divergence spaces
:param sigmax: SIGMAX for SHADOW.
:param sigmaz: SIGMAZ for SHADOW.
:param sigmaxp: SIGDIX for SHADOW.
:param sigmazp: SIGDIZ for SHADOW.
:return: self
"""
self.set_divergence_gauss(sigmaxp,sigmazp)
self.set_spatial_gauss(sigmax,sigmaz)
return self
def set_energy_monochromatic(self,emin):
"""
Sets a single energy line for the source (monochromatic)
:param emin: the energy in eV
:return: self
"""
self.F_COLOR = 1
self.F_PHOT = 0 #eV
self.PH1 = emin
return self
def set_energy_box(self,emin,emax):
"""
Sets a box energy distribution for the source (monochromatic)
:param emin: minimum energy in eV
:param emax: maximum energy in eV
:return: self
"""
self.F_COLOR = 3
self.F_PHOT = 0 #eV
self.PH1 = emin
self.PH2 = emax
return self
def set_pencil(self):
"""
Sets a pencil beam (zero size, zero divergence)
:return:
"""
self.FSOUR = 0
self.FDISTR = 1
self.HDIV1 = 0.0
self.HDIV2 = 0.0
self.VDIV1 = 0.0
self.VDIV2 = 0.0
return self
def apply_gaussian_undulator(self, undulator_length_in_m=1.0,user_unit_to_m=1e2, verbose=1, und_e0=None):
"""
Convolves the already defined Gaussian source (for the electrons) with the photon emission
for an undulator.
:param undulator_length_in_m:
:param user_unit_to_m:
:param verbose: set to 0 for silent output
:param und_e0: the setting photon energy in eV, if undefined (None) reads from SHADOW PH1 variable
:return: self
"""
#user_unit_to_m = 1e-2
codata_c = numpy.array(299792458.0)
codata_h = numpy.array(6.62606957e-34)
codata_ec = numpy.array(1.602176565e-19)
m2ev = codata_c*codata_h/codata_ec
if und_e0 == None:
if self.F_COLOR == 3: # box
und_e0 = 0.5*(self.PH1+self.PH2)
else:
und_e0 = self.PH1
lambda1 = m2ev/und_e0
if verbose:
print("---adding undulator radiation in Gaussian approximation:")
print('')
print(" photon energy [eV]: %f "%(und_e0))
print(" photon wavelength [A]: %f "%(lambda1*1e10))
# calculate sizes of the photon undulator beam
# see formulas 25 & 30 in Elleaume (Onaki & Elleaume)
s_phot = 2.740/(4e0*numpy.pi)*numpy.sqrt(undulator_length_in_m*lambda1)
sp_phot = 0.69*numpy.sqrt(lambda1/undulator_length_in_m)
if verbose:
print('')
print(' RMS electon size H/V [um]: '+
repr(self.SIGMAX*1e6*user_unit_to_m)+ ' / '+
repr(self.SIGMAZ*1e6*user_unit_to_m) )
print(' RMS electon divergence H/V[urad]: '+
repr(self.SIGDIX*1e6)+ ' / '+
repr(self.SIGDIZ*1e6) )
print('')
print(' RMS radiation size [um]: '+repr(s_phot*1e6))
print(' RMS radiation divergence [urad]: '+repr(sp_phot*1e6))
print('')
print(' Photon beam (convolution): ')
photon_h = numpy.sqrt( self.SIGMAX**2 + (s_phot/user_unit_to_m)**2)
photon_v = numpy.sqrt( self.SIGMAZ**2 + (s_phot/user_unit_to_m)**2)
photon_hp = numpy.sqrt(self.SIGDIX**2 + sp_phot**2 )
photon_vp = numpy.sqrt(self.SIGDIZ**2 + sp_phot**2 )
if verbose:
print(' RMS size H/V [um]: '+ repr(photon_h*1e6*user_unit_to_m) + ' / '+repr(photon_v*1e6*user_unit_to_m))
print(' RMS divergence H/V [um]: '+ repr(photon_hp*1e6) + ' / '+repr(photon_vp*1e6))
self.SIGMAX = photon_h
self.SIGMAZ = photon_v
self.SIGDIX = photon_hp
self.SIGDIZ = photon_vp
return self
def sourcinfo(self,title=None):
'''
mimics SHADOW sourcinfo postprocessor. Returns a text array.
:return: a text string
'''
txt = ''
TOPLIN = '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n'
TSPATIAL = {}
TDEPTH = {}
TANG = {}
TPHOT = {}
TPOL = {}
TSPATIAL['1'] = 'POINT '
TSPATIAL['4'] = 'GAUSSIAN '
TSPATIAL['2'] = 'RECTANGULAR '
TSPATIAL['3'] = 'ELLIPTICAL '
TSPATIAL['5'] = 'PHASE SPACE ELLIPSE'
TDEPTH['1'] = 'DEPTH OFF '
TDEPTH['2'] = 'DEPTH ON '
TDEPTH['3'] = 'RECTANGULAR '
TDEPTH['4'] = 'GAUSSIAN '
TDEPTH['5'] = 'SYNCHROTRON '
TANG['1'] = 'UNIFORM '
TANG['2'] = 'LAMBERTIAN '
TANG['3'] = 'GAUSSIAN '
TANG['4'] = 'SYNCHROTRON '
TANG['5'] = 'CONICAL '
TANG['6'] = 'SYNCHROTRON (exact)'
TANG['7'] = 'PHASE SPACE ELLIPSE'
TPHOT['1'] = 'PHOTON OFF '
TPHOT['2'] = 'PHOTON ON '
TPHOT['3'] = 'SINGLE LINE '
TPHOT['4'] = 'MULTI LINE '
TPHOT['5'] = 'BOX DISTR. '
TPOL['1'] = 'SR PARALLEL '
TPOL['2'] = 'SR PERPEND '
TPOL['3'] = 'SR TOTAL '
txt += TOPLIN
txt += '************** S O U R C E D E S C R I P T I O N **************\n'
if title == None:
txt += '\n\n'
else:
txt += title+'\n'
txt += TOPLIN
# !C
# !C Type of computation
# !C
if self.FGRID == 0:
txt += 'Random Source\n'
if self.FGRID == 1:
txt += 'Grid Source\n'
if self.FGRID == 2:
txt += 'Mixed Type Source. Spatial: GRID , directions: RANDOM\n'
if self.FGRID == 3:
txt += 'Mixed Type Source. Spatial: RANDOM, directions: GRID\n'
if self.FGRID == 4:
txt += 'Phase space ellipses. RANDOM around each ellipse.\n'
if self.FGRID == 5:
txt += 'Phase space ellipses. GRID around each ellipse.\n'
txt += 'Generated total %d rays.\n'%(self.NPOINT)
# !C
# !C Spatial type and values
# !C
if self.FSOURCE_DEPTH == 1:
txt += 'Source assumed BIDIMENSIONAL (flat).\n'
else:
txt += 'Source assumed TRIDIMENSIONAL.\n'
txt += 'Source Spatial Characteristics: '+TSPATIAL[str(1+self.FSOUR)]
txt += '\n'
if ((self.FSOUR == 1) or (self.FSOUR)) == 2:
txt += 'Source width: %17.9g and Height: %17.9g'%(self.WXSOU,self.WZSOU)
if ((self.FSOUR == 3) or (self.FSOUR == 4)):
txt += 'Sigma X: %17.9g and Sigma Z: %17.9g'%(self.SIGMAX,self.SIGMAZ)
txt += '\n'
if self.FSOURCE_DEPTH == 2:
txt += 'Depth: UNIFORM Value : %17.9g\n'%(self.WYSOU)
if self.FSOURCE_DEPTH == 3:
txt += 'Depth: GAUSSIAN. Sigma-y : %17.9g\n'%(self.SIGMAY)
if self.FSOURCE_DEPTH == 4:
txt += 'Depth: SYNCHROTRON SOURCE.\n'
# !C
# !C Source Emission
# !C
txt += TOPLIN
txt += 'Source Emission Characteristics\n'
txt += 'Distribution Type: %s \n'%(TANG[str(self.FDISTR)])
if self.FDISTR != 5:
txt += 'Distribution Limits. +X : %17.9g -X: %17.9g rad\n'%(self.HDIV1,self.HDIV2)
txt += ' +Z : %17.9g -Z: %17.9g rad\n'%(self.VDIV1,self.VDIV2)
if ((self.FDISTR == 3) or (self.FDISTR == 7)):
txt += 'Horiz. StDev : %17.9g\n'%(self.SIGDIX)
txt += 'Verti. StDev : %17.9g\n'%(self.SIGDIZ)
if self.FDISTR == 5:
txt += 'Cone Outer Aperture : %17.9g Inner Aperture : %17.9g \n'%(self.CONE_MAX,self.CONE_MIN)
# !C
# !C Synchrotron Case
# !C
if ((self.FDISTR == 4) or (self.FDISTR == 6)):
txt += 'Magnetic Radius = %17.9g m. Beam Energy = %17.9g GeV.\n'%(self.R_MAGNET,self.BENER)
txt += 'Beam Emittancies. EPSI_X: %17.9g EPSI_Z: %17.9g \n'%(self.EPSI_X,self.EPSI_Z)
txt += 'Distance from Waist. X: %17.9g Z: %17.9g \n'%(self.EPSI_DX,self.EPSI_DZ)
txt += 'Polarization Used: %s \n'%(TPOL[str(self.F_POL)])
# !C
# !C Photon Energy
# !C
txt += TOPLIN
if self.F_COLOR != 0:
photonArray = {}
WAVE = {}
photonArray['1'] = self.PH1
photonArray['2'] = self.PH2
photonArray['3'] = self.PH3
photonArray['4'] = self.PH4
photonArray['5'] = self.PH5
photonArray['6'] = self.PH6
photonArray['7'] = self.PH7
photonArray['8'] = self.PH8
photonArray['9'] = self.PH9
photonArray['10'] = self.PH10
WAVE['1'] = self.PH1
WAVE['2'] = self.PH2
WAVE['3'] = self.PH3
WAVE['4'] = self.PH4
WAVE['5'] = self.PH5
WAVE['6'] = self.PH6
WAVE['7'] = self.PH7
WAVE['8'] = self.PH8
WAVE['9'] = self.PH9
WAVE['10'] = self.PH10
txt += 'Source Photon Energy Distribution: %s \n'%(TPHOT[str(2+self.F_COLOR)])
codata_h = numpy.array(6.62606957e-34)
codata_ec = numpy.array(1.602176565e-19)
codata_c = numpy.array(299792458.0)
TOANGS = codata_h*codata_c/codata_ec*1e10
if self.F_COLOR <=2:
if self.F_COLOR == 1:
if (self.F_PHOT == 0): WAVE['1'] = TOANGS/photonArray['1']
if (self.F_PHOT == 1): photonArray['1'] = TOANGS/WAVE['1']
txt += 'Photon Energy: %12.5g eV, or %12.5g Angs. \n'%(photonArray['1'],WAVE['1'])
else:
for J in range(1,1+self.N_COLOR):
if (self.F_PHOT == 0): WAVE[str(J)] = TOANGS/photonArray[str(J)]
if (self.F_PHOT == 1): photonArray[str(J)] = TOANGS/WAVE[str(J)]
txt += 'Photon Energy: %12.5g eV, or %12.5g Angs.'%(photonArray[str(J)],WAVE[str(J)])
else:
if (self.F_PHOT == 0): WAVE['1'] = TOANGS/photonArray['1']
if (self.F_PHOT == 1): photonArray['1'] = TOANGS/WAVE['1']
if (self.F_PHOT == 0): WAVE['2'] = TOANGS/photonArray['2']
if (self.F_PHOT == 1): photonArray['2'] = TOANGS/WAVE['2']
txt += 'From Photon Energy: %17.9g eV or %17.9g Angs.\n'%(photonArray['1'],WAVE['1'])
txt += ' to Photon Energy: %17.9g eV or %17.9g Angs.\n'%(photonArray['2'],WAVE['2'])
if self.F_POLAR:
txt += 'Angular difference in phase is %12.5g \n'%(self.POL_ANGLE*180.0/numpy.pi)
txt += 'Degree of polarization is %12.5g \n'%(self.POL_DEG)
if self.F_COHER == 0:
txt += 'Source points have INCOHERENT phase.\n'
else:
txt += 'Source points have COHERENT phase.\n'
#
# optimization
#
if self.F_BOUND_SOUR > 0:
txt += 'Source optimization (rejection, variance reduction) used: \n'
txt += ' total number of rays been created: %d \n'%(self.NTOTALPOINT)
txt += ' accepted rays (stored): %d \n'%self.NPOINT
txt += ' rejected: %d \n'%(self.NTOTALPOINT-self.NPOINT)
txt += ' created/accepted ratio: %d \n'%(float(self.NTOTALPOINT)/float(self.NPOINT))
if self.F_BOUND_SOUR == 1:
txt += ' file with phase-space volume: '+self.FILE_BOUND.strip().decode()
else:
txt += ' file with slit/acceptance: '+self.FILE_BOUND.strip().decode()
txt += TOPLIN
txt += '*************** E N D ***************\n'
txt += TOPLIN
return (txt)
if __name__ == '__main__':
#
# test
#
do_test = 0 # 0=None, 1=only source ; 2= source and trace ; 3=undulator_gaussian ; 4 lens, like in lens_single_plot.ws
# 6=ID30B # 7=ID23-2
if ((do_test == 1) or (do_test == 2)):
src = Source()
#set Gaussian source
sh, sv, shp, svp = 100e-4, 10e-4, 10e-6, 1e-6
src.set_gauss(sh, sv, shp, svp)
src.write('start.00')
#run shadow source
beam = Beam()
beam.genSource(src)
beam.write('begin.dat')
src.write('end.00')
#analyze source results
print('Intensity source, all, good and lost rays: %f, %f, %f , '%\
(beam.intensity(nolost=0),beam.intensity(nolost=1),beam.intensity(nolost=2) ))
#print( src.to_dictionary() )
print(src.sourcinfo(title='sourcinfo in python'))
#4 histograms
ticket_h = beam.histo1(col=1, nbins = 500, nolost=1, write='HISTO1', xrange=None , ref=1)
print('Histogram FWHM: %f, stdev: %f, initial: %f\n: '%(ticket_h['fwhm'],ticket_h['fwhm']/2.35,sh))
ticket_h = beam.histo1(col=3, nbins = 500, nolost=1, write='HISTO1', xrange=None , ref=1)
print('Histogram FWHM: %f, stdev: %f, initial: %f\n: '%(ticket_h['fwhm'],ticket_h['fwhm']/2.35,sv))
ticket_h = beam.histo1(col=4, nbins = 500, nolost=1, write='HISTO1', xrange=None , ref=1)
print('Histogram FWHM: %f, stdev: %f, initial: %f\n: '%(ticket_h['fwhm'],ticket_h['fwhm']/2.35,shp))
ticket_h = beam.histo1(col=6, nbins = 500, nolost=1, write='HISTO1', xrange=None , ref=1)
print('Histogram FWHM: %f, stdev: %f, initial: %f\n: '%(ticket_h['fwhm'],ticket_h['fwhm']/2.35,svp))
if do_test == 2:
oe1 = OE()
oe1.write('start.01')
#oe1.load('tmp/start.01')
beam.traceOE(oe1,1)
oe1.write('end.01')
beam.write('star.01')
#analysis
#print(beam.getshonecol(11,nolost=1))
print('Intensity after oe 1 all, good and lost rays: %f, %f, %f , '%\
(beam.intensity(nolost=0),beam.intensity(nolost=1),beam.intensity(nolost=2) ))
#print( oe1.to_dictionary() )
print(oe1.mirinfo(title='mirinfo in python'))
ticket = beam.histo1(col=3, nbins =11, nolost=1, write='HISTO1', xrange=[-0.055, 0.055], ref=1)
if ticket['error'] == 0:
beam.write('star.01')
bins = ticket['bin_left']
bins_c = ticket['bin_center']
h = ticket['histogram']
print('shape, bins: ',bins.shape)
print('shape, histogram: ',h.shape)
for i,hi in enumerate(h):
print(i,bins_c[i],bins[i], hi)
else:
print('Error in histogram calculations')
if do_test == 3:
# example ESRF ID30B, data in m,rad
emittH = 3.9e-9
emittV = 10e-12
betaH = 35.6
betaV = 3.0
sigmaH = numpy.sqrt(emittH/betaH)
sigmaV = numpy.sqrt(emittV/betaV)
sigmaHp = emittH/sigmaH
sigmaVp = emittV/sigmaV
src = Source()
src.set_gauss(sigmaH*1e2, sigmaV*1e2, sigmaHp, sigmaVp) #cm,rad
src.set_energy_monochromatic(14000.0)
print("BEFORE sH: %f um,sV: %f um, sHp: %f urad, sVp: %f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.apply_gaussian_undulator(undulator_length_in_m=2.8,\
user_unit_to_m=1e-2,verbose=1)
print("AFTER sH: %f um,sV: %f um, sHp: %f urad, sVp: %f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.write('start.00')
print("File written to disk: start.00")
# create source
beam = Beam()
beam.genSource(src)
beam.write("beginG.dat")
print("File written to disk: beginG.dat")
src.write('end.00')
print("File written to disk: end.00")
if do_test == 4:
print("setting lens system like Example: lens_single_sysplot.ws")
src = Source()
src.set_energy_monochromatic(4600)
src.set_gauss(0.2,0.2,1e-6,1e-6)
src.NPOINT = 5000
src.ISTAR1 = 677543155
src.write("start.00")
# create source
beam = Beam()
beam.genSource(src)
src.write("end.00")
beam.write("begin.dat")
lens = CompoundOE()
lens.append_lens(1000.0,1000.0,surface_shape=1,convex_to_the_beam=1,diameter=None,cylinder_angle=None,\
radius=1000.0,interthickness=5.0,\
refraction_index=1.5,attenuation_coefficient=0.0, \
use_ccc=0)
#lens.dump_start_files()--
listEnd = beam.traceCompoundOE(lens,write_start_files=1,write_end_files=1,write_star_files=1)
lens.info()
print(lens.mirinfo())
if do_test == 5:
print("setting CRL system like Example: crl_snigirev1996.ws")
src = Source()
src.set_energy_monochromatic(14000)
src.set_spatial_gauss(0.00638,0.00638)
#conical
src.FDISTR = 5
src.CONE_MIN = 0.0
src.CONE_MAX = 10e-6
src.NPOINT = 5000
src.ISTAR1 = 677543155
src.write("start.00")
# create source
beam = Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
# crl parameters
crl_nlenses = 30 # number of lenses
crl_shape = 1 #1=Sphere 4=Paraboloid 5=Plan
crl_cylinder = 0.0 #None: no cylindrical, 0=meridional, 1=sagittal :
crl_r = 300e-4 #radius (at tip for parabolas) or major axis for ell/hyp :
crl_diameter = None # 600e-4 #lens physical aperture
crl_interthickness = 25e-4 #thickness between two interfaces (in material)
crl_thickness = 625e-4 #total thickness of a single lens
crl_fs_before = 3000.0 #free space before the first lens
crl_fs_after = 189.87 #free space after the last lens
# shadow3> prerefl_test
# prerefl_test: calculates refraction index for a given energy
# using a file created by the prerefl preprocessor.
#
# File Name (from prerefl): Al5_55.dat
# Photon energy [eV]: 14000
# ------------------------------------------------------------------------
# Inputs:
# prerefl file: Al5_55.dat gives for E= 14000.000000000000 eV:
# energy [eV]: 14000.000000000000
# wavelength [A]: 0.88560137800029992
# wavenumber (2 pi/lambda) [cm^-1]: 709482351.55332136
# Outputs:
# refraction index = (1-delta) + i*beta :
# delta: 2.7710264971503307E-006
# beta: 1.7175194768200010E-008
# real(n): 0.99999722897350285
# attenuation coef [cm^-1]: 24.370995145057691
# ------------------------------------------------------------------------
crl_file = "" # "Al5_55.dat" #material file (from prerefl preprocessor)
refraction_index = 0.99999722897350285
attenuation_coefficient = 24.370995145057691
# initialize compound oe
crl = CompoundOE(name = 'crl_snigirev1996')
# method 0: manuel loop, 1: use append_crl
method = 1
if method == 0:
p0 = crl_fs_before
q0 = crl_fs_after
p_or_q = 0.5*(crl_thickness - crl_interthickness)
for i in range(crl_nlenses):
pi = p_or_q
qi = p_or_q
if i == 0:
pi = crl_fs_before
if i == crl_nlenses-1:
qi = crl_fs_after
crl.append_lens(pi,qi,surface_shape=crl_shape,\
convex_to_the_beam=0,diameter=crl_diameter,\
prerefl_file=crl_file, \
refraction_index=refraction_index, attenuation_coefficient=attenuation_coefficient,\
cylinder_angle=crl_cylinder,radius=crl_r,interthickness=crl_interthickness,\
use_ccc=1)
else:
crl.append_crl(crl_fs_before, crl_fs_after, nlenses=crl_nlenses, surface_shape=crl_shape, \
convex_to_the_beam=0,diameter=crl_diameter,\
prerefl_file=crl_file,\
refraction_index=refraction_index, attenuation_coefficient=attenuation_coefficient, \
cylinder_angle=crl_cylinder,radius=crl_r,interthickness=crl_interthickness,\
use_ccc=1)
# trace system
beam.traceCompoundOE(crl,\
write_start_files=0,write_end_files=0,write_star_files=0)
#write only last result file
beam.write("star.60")
print("\nFile written to disk: star.60")
print("\nNumber of interfaces: %d"%(crl.number_oe()))
#crl.dump_systemfile() # lens.info()
#print(crl.mirinfo())
if do_test == 6:
print("setting Transfocator for ID30B")
#
# Gaussian undulator source
#
#ID30 TDR data, pag 10, in m
emittH = 3.9e-9
emittV = 10e-12
betaH = 35.6
betaV = 3.0
sigmaXp = numpy.sqrt(emittH/betaH)
sigmaZp = numpy.sqrt(emittV/betaV)
sigmaX = emittH/sigmaXp
sigmaZ = emittV/sigmaZp
print("\n\nElectron sizes H:%f um, V:%fu m;\nelectron divergences: H:%f urad, V:%f urad"%\
(sigmaX*1e6, sigmaZ*1e6, sigmaXp*1e6, sigmaZp*1e6))
# set Gaussian undulator source at 14 keV
src = Source()
photon_energy_ev = 14000
src.set_energy_monochromatic(photon_energy_ev)
src.set_gauss(sigmaX*1e2,sigmaZ*1e2,sigmaXp,sigmaZp)
print("\n\nElectron sizes stored H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.apply_gaussian_undulator(undulator_length_in_m=2.8, user_unit_to_m=1e-2, verbose=1)
print("\n\nElectron sizes stored (undulator) H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.NPOINT = 5000
src.ISTAR1 = 677543155
src.write("start.00")
# create source
beam = Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
#
# transfocator id30B
#
#set transfocator units in cm ================================================================================
# geometry of the TF
tf_slots = [ 1, 2, 4, 8, 1, 2, 1] # slots
tf_on_off = [ 1, 1, 1, 1, 1, 1, 1] # set (1) or unset (0)
nslots = len(tf_slots)
tf_lens_thickness = [0.3 for i in range(nslots)] #total thickness of a single lens in cm
# for each slot, positional gap of the first lens in cm
tf_step = [ 4, 4, 1.9, 6.1, 4, 4, tf_lens_thickness[-1]]
tf_radii = [.05, .05, .05, .05, 0.1, 0.1, 0.15] # radii of the lenses in cm
# File Name (from prerefl): Be5_55.dat
# Photon energy [eV]: 14000
# ------------------------------------------------------------------------
# Inputs:
# prerefl file: Be5_55.dat gives for E= 14000.000000000000 eV:
# energy [eV]: 14000.000000000000
# wavelength [A]: 0.88560137800029992
# wavenumber (2 pi/lambda) [cm^-1]: 709482351.55332136
# Outputs:
# refraction index = (1-delta) + i*beta :
# delta: 1.7354949043424384E-006
# beta: 4.4123016940187606E-010
# real(n): 0.99999826450509566
# attenuation coef [cm^-1]: 0.62609003632702676
# ------------------------------------------------------------------------
refraction_index = 0.99999826450509566
attenuation_coefficient = 0.626090036
# position of the TF measured from the center of the transfocator
tf_p = 5960
tf_q = 1000 # 9760 - tf_p
#calculated values
# these are distances p and q with TF length removed
tf_length = numpy.array(tf_step).sum() #tf length in cm
tf_fs_before = tf_p - 0.5*tf_length #distance from source to center of transfocator
tf_fs_after = tf_q - 0.5*tf_length # distance from center of transfocator to image
# for each slot, these are the empty distances before and after the lenses
tf_p0 = numpy.zeros(nslots)
tf_q0 = numpy.array(tf_step) - (numpy.array(tf_slots) * tf_lens_thickness)
# add now the p q distances
tf_p0[0] += tf_fs_before
tf_q0[-1] += tf_fs_after
nlenses = numpy.array(tf_slots)*numpy.array(tf_on_off)
slots_empty = (numpy.array(tf_slots)-nlenses)
# # this is for calculations with xraylib (focal distances)
# xrl_symbol = "Be"
# xrl_density = 1.845
# build transfocator
tf = CompoundOE(name='TF ID30B')
tf.append_transfocator(tf_p0.tolist(), tf_q0.tolist(), nlenses=nlenses, radius=tf_radii,\
slots_empty=0, surface_shape=4, convex_to_the_beam=0, diameter=None,\
#prerefl_file="Be5_55.dat",\
refraction_index=refraction_index,attenuation_coefficient=attenuation_coefficient, \
cylinder_angle=0.0,interthickness=50e-4,thickness=0.3,\
use_ccc=0)
#trace system
tf.dump_systemfile()
beam.traceCompoundOE(tf,\
write_start_files=2,write_end_files=2,write_star_files=2,write_mirr_files=2)
#write only last result file
beam.write("star_tf.dat")
print("\nFile written to disk: star_tf.dat")
print("\nLens stack: ",nlenses," empty slots: ",slots_empty)
print("\nNumber of interfaces: %d"%(tf.number_oe()))
print("\nTotal beamline length (from compound element) %f m"%(1e-2*tf.length()))
print("\nTotal Transfocator length %f m"%(1e-2*tf_length))
print("\nTotal Transfocator length (from compound element): %f cm "%(tf.length()-tf_fs_after-tf_fs_before))
print("\ntf_fs_before: %f m, tf_fs_after: %f m"%(tf_fs_before*1e-2,tf_fs_after*1e-2))
if do_test == 7:
print("setting KB for ID23-2")
# create source
src = Source()
src.set_energy_monochromatic(14200.0)
SIGMAX = 0.00374784
SIGMAZ = 0.000425671
SIGDIX = 0.000107037
SIGDIZ = 5.55325e-06
src.set_gauss(SIGMAX,SIGMAZ,SIGDIX,SIGDIZ)
src.write("start.00")
beam = Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
kb = CompoundOE(name='KB')
kb.append_kb(4275,180,separation=4315-4275,grazing_angles_mrad=[3.9,17.8],shape=[2,2], \
dimensions1=[6,20],dimensions2=[6,30],reflectivity_kind=[0,0],reflectivity_files=["",""],\
surface_error_files=["waviness.dat","waviness.dat"])
kb.info()
#trace
kb.dump_systemfile()
beam.traceCompoundOE(kb,write_start_files=1,write_end_files=1,write_star_files=1)
if do_test == 8:
print("setting double crystal monochromator")
# create source
src = Source()
src.set_energy_box(13990,14010)
SIGMAX = 0.00374784
SIGMAZ = 0.000425671
SIGDIX = 0.000107037
SIGDIZ = 5.55325e-06
src.set_gauss(SIGMAX,SIGMAZ,SIGDIX,SIGDIZ)
src.write("start.00")
beam = Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
dcm = CompoundOE(name='DCM')
dcm.append_monochromator_double_crystal(4275,180,separation=10, photon_energy_ev=14000.0, \
dimensions1=[6,20],dimensions2=[0,0],reflectivity_file="Si5_55.111" )
#trace
dcm.dump_systemfile()
beam.traceCompoundOE(dcm,write_start_files=1,write_end_files=1,write_star_files=1)
if 0: # test duplicate elements
src1 = src.duplicate()
src1.NPOINT=15000
print("\n\n>>> orig NPOINT=%d, copy NPOINT=%d"%(src.NPOINT,src1.NPOINT))
print("\n\n")
oen = OE()
oen.T_IMAGE = 1.00
oen_bis = oen.duplicate()
oen_bis.T_IMAGE = 2.0
print("\n\n>>> orig T_IMAGE=%f, copy T_IMAGE=%f"%(oen.T_IMAGE,oen_bis.T_IMAGE))
if 1: # test plotxy
tkt = beam.histo2(1,3,nbins_h=3,nbins_v=3)
print(tkt)
print("H left",tkt["bin_h_left"])
print("H righ",tkt["bin_h_right"])
print("H cent",tkt["bin_h_center"])
print("H edges",tkt["bin_h_edges"])
print("H shape: ",tkt["histogram"].shape)
|
radiasoft/pypi-shadow3
|
Shadow/ShadowLibExtensions.py
|
Python
|
gpl-3.0
| 124,400
|
[
"CRYSTAL",
"Gaussian"
] |
f5202a922939511fc72c25b849149796ef88226d03e3eede6a43d9d12137537a
|
from Sire.Base import *
from Sire.IO import *
from Sire.Mol import *
from glob import glob
from nose.tools import assert_equal, assert_almost_equal
# Check that we have PDB2 support in this version of Sire.
has_pdb2 = True
try:
p = PDB2()
except:
# No PDB2 support.
has_pdb2 = False
# General test of ability to read and write PDB files.
# All PDB files in the "../io/" directory are parsed.
# Once the input file is parsed we then check that the parser constructs a
# Sire Molecule from the parsed data. Following this, we then check that the
# parser can convert the molecule back into the correct data format, ready to
# be written to file.
def test_read_write(verbose=False):
if not has_pdb2:
return
# Glob all of the PDB files in the example file directory.
pdbfiles = glob('../io/*pdb')
# Loop over all files.
for file in pdbfiles:
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading PDB file: %s" % file)
print("Parallel = %s" % use_par)
# Parse the file into a PDB2 object.
# Errors should be thrown if the record data in a file
# doesn't match the PDB format.
p = PDB2(file, {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Construct a Sire molecular system.
s = p.toSystem()
if verbose:
print("Reconstructing PDB data from molecular system...")
# Now re-parse the molecular system.
p = PDB2(s, {"parallel" : wrap(use_par)})
if verbose:
print("Passed!\n")
# Specific atom coordinate data validation test for file "../io/ntrc.pdb".
def test_atom_coords(verbose=False):
if not has_pdb2:
return
# Test atoms.
atoms = ["CA", "CB", "N", "O", "HB"]
# Test coordinates.
coords = [[-13.721, -3.484, 14.690],
[-10.695, -0.294, 14.759],
[ -8.536, -2.557, 13.277],
[ -7.037, -1.615, 9.350],
[ -5.045, 2.118, 8.812]]
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading PDB file: ../io/ntrc.pdb")
print("Parallel = %s" % use_par)
# Parse the PDB file.
p = PDB2('../io/ntrc.pdb', {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Create a molecular system.
s = p.toSystem()
# Get the first molecule.
m = s[MolIdx(0)]
if verbose:
print("Checking atomic coordinates...")
# Loop over all of the atoms.
for i in range(0, len(atoms)):
# Extract the atom from the residue "i + 1".
a = m.atom(AtomName(atoms[i]) + ResNum(i+1))
# Extract the atom coordinates.
c = a.property("coordinates")
# Validate parsed coordinates against known values.
assert_almost_equal( c[0], coords[i][0] )
assert_almost_equal( c[1], coords[i][1] )
assert_almost_equal( c[2], coords[i][2] )
if verbose:
print("Passed!\n")
# Test that elements are inferred correctly when the PDB file is a PSF companion
# used for a NAMD simulation. Here the element data is often missing, and this
# section of the record line is used to label the residue to which each atom
# belongs.
def test_psf_companion(verbose=False):
# Load the PDB file.
p = PDB2('../io/psf_companion.pdb')
# Create the molecular system.
s = p.toSystem()
# Create the list of elements.
elements = [ Element("C"),
Element("C"),
Element("O"),
Element("N"),
Element("H"),
Element("C"),
Element("C"),
Element("C"),
Element("O"),
Element("N") ]
# Now assert that the elements are correct.
for i, atom in enumerate(s.molecule(MolIdx(0)).atoms()):
assert atom.property("element") == elements[i]
if __name__ == "__main__":
test_read_write(True)
test_atom_coords(True)
test_psf_companion(True)
|
michellab/SireUnitTests
|
unittests/SireIO/test_pdb2.py
|
Python
|
gpl-3.0
| 4,335
|
[
"NAMD"
] |
cc415d5fcdf2e816cbc12dfa82aaa45e8b25f9f388e55c733428c1e602d2a34d
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2012-2015 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""pwkit.ellipses - utilities for manipulating 2D Gaussians and ellipses
XXXXXXX
XXX this code is in an incomplete state of being vectorized!!!
XXXXXXX
Useful for sources and bivariate error distributions. We can express the shape
of the function in several ways, which have different strengths and
weaknesses:
* "biv", as in Gaussian bivariate: sigma x, sigma y, cov(x,y)
* "ell", as in ellipse: major, minor, PA [*]
* "abc": coefficients such that z = exp (ax² + bxy + cy²)
[*] Any slice through a 2D Gaussian is an ellipse. Ours is defined such it is
the same as a Gaussian bivariate when major = minor.
Note that when considering astronomical position angles, conventionally
defined as East from North, the Dec/lat axis should be considered the X axis
and the RA/long axis should be considered the Y axis.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('''F2S S2F sigmascale clscale bivell bivnorm bivabc databiv bivrandom bivplot
ellnorm ellpoint elld2 ellbiv ellabc ellplot abcell abcd2
abcplot''').split ()
import numpy as np
from .numutil import broadcastize
# Some utilities for scaling ellipse axis lengths
F2S = 1 / np.sqrt (8 * np.log (2)) # FWHM to sigma; redundant w/ astutil
S2F = np.sqrt (8 * np.log (2))
@broadcastize(1)
def sigmascale (nsigma):
"""Say we take a Gaussian bivariate and convert the parameters of the
distribution to an ellipse (major, minor, PA). By what factor should we
scale those axes to make the area of the ellipse correspond to the n-sigma
confidence interval?
Negative or zero values result in NaN.
"""
from scipy.special import erfc
return np.sqrt (-2 * np.log (erfc (nsigma / np.sqrt (2))))
@broadcastize(1)
def clscale (cl):
"""Say we take a Gaussian bivariate and convert the parameters of the
distribution to an ellipse (major, minor, PA). By what factor should we
scale those axes to make the area of the ellipse correspond to the
confidence interval CL? (I.e. 0 < CL < 1)
"""
rv = np.sqrt (-2 * np.log (1 - cl))
rv[np.where (cl <= 0)] = np.nan
return rv
# Bivariate form: sigma x, sigma y, cov(x,y)
def _bivcheck (sx, sy, cxy):
if sx <= 0:
raise ValueError ('negative sx (%.10e)' % sx)
if sy <= 0:
raise ValueError ('negative sy (%.10e)' % sy)
if abs (cxy) >= sx * sy:
raise ValueError ('illegal covariance (sx=%.10e, sy=%.10e, cxy=%.10e, '
'cxy/sxsy=%.16f)' % (sx, sy, cxy, cxy / (sx * sy)))
return sx, sy, cxy # convenience
def bivell (sx, sy, cxy):
"""Given the parameters of a Gaussian bivariate distribution, compute the
parameters for the equivalent 2D Gaussian in ellipse form (major, minor,
pa).
Inputs:
* sx: standard deviation (not variance) of x var
* sy: standard deviation (not variance) of y var
* cxy: covariance (not correlation coefficient) of x and y
Outputs:
* mjr: major axis of equivalent 2D Gaussian (sigma, not FWHM)
* mnr: minor axis
* pa: position angle, rotating from +x to +y
Lots of sanity-checking because it's obnoxiously easy to have numerics
that just barely blow up on you.
"""
# See CfA notebook #1 pp. 129-133.
_bivcheck (sx, sy, cxy)
from numpy import arctan2, sqrt
sx2, sy2, cxy2 = sx**2, sy**2, cxy**2
pa = 0.5 * arctan2 (2 * cxy, sx2 - sy2)
h = sqrt ((sx2 - sy2)**2 + 4*cxy2)
t = 2 * (sx2 * sy2 - cxy2) / (sx2 + sy2 - h)
if t < 0:
raise ValueError ('covariance just barely out of bounds [1] '
'(sx=%.10e, sy=%.10e, cxy=%.10e, cxy/sxsy=%.16f)' %
(sx, sy, cxy, cxy / (sx * sy)))
mjr = sqrt (t)
t = 2 * (sx2 * sy2 - cxy2) / (sx2 + sy2 + h)
if t < 0: # if we got this far, shouldn't happen, but ...
raise ValueError ('covariance just barely out of bounds [2] '
'(sx=%.10e, sy=%.10e, cxy=%.10e, cxy/sxsy=%.16f)' %
(sx, sy, cxy, cxy / (sx * sy)))
mnr = sqrt (t)
return ellnorm (mjr, mnr, pa)
def bivnorm (sx, sy, cxy):
"""Given the parameters of a Gaussian bivariate distribution, compute the
correct normalization for the equivalent 2D Gaussian. It's 1 / (2 pi sqrt
(sx**2 sy**2 - cxy**2). This function adds a lot of sanity checking.
Inputs:
* sx: standard deviation (not variance) of x var
* sy: standard deviation (not variance) of y var
* cxy: covariance (not correlation coefficient) of x and y
Returns: the scalar normalization
"""
_bivcheck (sx, sy, cxy)
from numpy import pi, sqrt
t = (sx * sy)**2 - cxy**2
if t <= 0:
raise ValueError ('covariance just barely out of bounds '
'(sx=%.10e, sy=%.10e, cxy=%.10e, cxy/sxsy=%.16f)' %
(sx, sy, cxy, cxy / (sx * sy)))
return (2 * pi * sqrt (t))**-1
def bivabc (sx, sy, cxy):
"""Compute nontrivial parameters for evaluating a bivariate distribution
as a 2D Gaussian. Inputs:
* sx: standard deviation (not variance) of x var
* sy: standard deviation (not variance) of y var
* cxy: covariance (not correlation coefficient) of x and y
Returns: (a, b, c), where z = k exp (ax² + bxy + cy²)
The proper value for k can be obtained from bivnorm().
"""
_bivcheck (sx, sy, cxy)
sx2, sy2, cxy2 = sx**2, sy**2, cxy**2
t = 1. / (sx2 * sy2 - cxy2)
if t <= 0:
raise ValueError ('covariance just barely out of bounds '
'(sx=%.10e, sy=%.10e, cxy=%.10e, cxy/sxsy=%.16f)' %
(sx, sy, cxy, cxy / (sx * sy)))
a = -0.5 * sy2 * t
c = -0.5 * sx2 * t
b = cxy * t
return _abccheck (a, b, c)
def databiv (xy, coordouter=False, **kwargs):
"""Compute the main parameters of a bivariate distribution from data. The
parameters are returned in the same format as used in the rest of this
module.
* xy: a 2D data array of shape (2, nsamp) or (nsamp, 2)
* coordouter: if True, the coordinate axis is the outer axis; i.e.
the shape is (2, nsamp). Otherwise, the coordinate axis is the
inner axis; i.e. shape is (nsamp, 2).
Returns: (sx, sy, cxy)
In both cases, the first slice along the coordinate axis gives the X data
(i.e., xy[0] or xy[:,0]) and the second slice gives the Y data (xy[1] or
xy[:,1]).
"""
xy = np.asarray (xy)
if xy.ndim != 2:
raise ValueError ('"xy" must be a 2D array')
if coordouter:
if xy.shape[0] != 2:
raise ValueError ('if "coordouter" is True, first axis of "xy" '
'must have size 2')
else:
if xy.shape[1] != 2:
raise ValueError ('if "coordouter" is False, second axis of "xy" '
'must have size 2')
cov = np.cov (xy, rowvar=coordouter, **kwargs)
sx, sy = np.sqrt (np.diag (cov))
cxy = cov[0,1]
return _bivcheck (sx, sy, cxy)
def bivrandom (x0, y0, sx, sy, cxy, size=None):
"""Compute random values distributed according to the specified bivariate
distribution.
Inputs:
* x0: the center of the x distribution (i.e. its intended mean)
* y0: the center of the y distribution
* sx: standard deviation (not variance) of x var
* sy: standard deviation (not variance) of y var
* cxy: covariance (not correlation coefficient) of x and y
* size (optional): the number of values to compute
Returns: array of shape (size, 2); or just (2, ), if size was not
specified.
The bivariate parameters of the generated data are approximately
recoverable by calling 'databiv(retval)'.
"""
from numpy.random import multivariate_normal as mvn
p0 = np.asarray ([x0, y0])
cov = np.asarray ([[sx**2, cxy],
[cxy, sy**2]])
return mvn (p0, cov, size)
def bivconvolve (sx_a, sy_a, cxy_a, sx_b, sy_b, cxy_b):
"""Given two independent bivariate distributions, compute a bivariate
distribution corresponding to their convolution.
I'm sure this is worked out in a ton of places, but I got the equations
from Pineau+ (2011A&A...527A.126P).
Returns: (sx_c, sy_c, cxy_c), the parameters of the convolved
distribution.
"""
_bivcheck (sx_a, sy_a, cxy_a)
_bivcheck (sx_b, sy_b, cxy_b)
sx_c = np.sqrt (sx_a**2 + sx_b**2)
sy_c = np.sqrt (sy_a**2 + sy_b**2)
cxy_c = cxy_a + cxy_b
return _bivcheck (sx_c, sy_c, cxy_c)
def bivplot (sx, sy, cxy, **kwargs):
_bivcheck (sx, sy, cxy)
return ellplot (*bivell (sx, sy, cxy), **kwargs)
# Ellipse form: major, minor, pa
def _ellcheck (mjr, mnr, pa):
if mjr <= 0:
raise ValueError ('mjr must be positive (%.10e)' % mjr)
if mnr <= 0:
raise ValueError ('mnr must be positive (%.10e)' % mnr)
if mnr > mjr:
raise ValueError ('mnr must be less than mjr (mnr=%.10e, '
'mjr=%.10e)' % (mnr, mjr))
return mjr, mnr, pa
@broadcastize (3, ret_spec=(0, 0, 0))
def ellnorm (mjr, mnr, pa):
bad = (mjr <= 0) | (mnr <= 0)
half_pi = 0.5 * np.pi
# swap major and minor if minor is bigger
swap = np.where (mnr > mjr)
temp = mnr[swap]
mnr[swap] = mjr[swap]
mjr[swap] = temp
pa[swap] += half_pi
# center PA in [-pi/2, +pi/2]
pa = ((pa + half_pi) % np.pi) - half_pi
mjr[bad] = np.nan
mnr[bad] = np.nan
pa[bad] = np.nan
return mjr, mnr, pa
def ellpoint (mjr, mnr, pa, th):
"""Compute a point on an ellipse parametrically. Inputs:
* mjr: major axis (sigma not FWHM) of the ellipse
* mnr: minor axis (sigma not FWHM) of the ellipse
* pa: position angle (from +x to +y) of the ellipse, radians
* th: the parameter, 0 <= th < 2pi: the eccentric anomaly
Returns: (x, y)
th may be a vector, in which case x and y will be as well.
"""
_ellcheck (mjr, mnr, pa)
from numpy import cos, sin
ct, st = cos (th), sin (th)
cp, sp = cos (pa), sin (pa)
x = mjr * cp * ct - mnr * sp * st
y = mjr * sp * ct + mnr * cp * st
return x, y
def elld2 (x0, y0, mjr, mnr, pa, x, y):
"""Given an 2D Gaussian expressed as an ellipse (major, minor, pa), compute a
"squared distance parameter" such that
z = exp (-0.5 * d2)
Inputs:
* x0: position of Gaussian center on x axis
* y0: position of Gaussian center on y axis
* mjr: major axis (sigma not FWHM) of the Gaussian
* mnr: minor axis (sigma not FWHM) of the Gaussian
* pa: position angle (from +x to +y) of the Gaussian, radians
* x: x coordinates of the locations for which to evaluate d2
* y: y coordinates of the locations for which to evaluate d2
Returns: d2, distance parameter defined as above.
x0, y0, mjr, and mnr may be in any units so long as they're consistent. x
and y may be arrays (of the same shape), in which case d2 will be an array
as well.
"""
_ellcheck (mjr, mnr, pa)
dx, dy = x - x0, y - y0
c, s = np.cos (pa), np.sin (pa)
a = c * dx + s * dy
b = -s * dx + c * dy
return (a / mjr)**2 + (b / mnr)**2
def ellbiv (mjr, mnr, pa):
"""Given a 2D Gaussian expressed as an ellipse (major, minor, pa), compute the
equivalent parameters for a Gaussian bivariate distribution. We assume
that the ellipse is normalized so that the functions evaluate identicall
for major = minor.
Inputs:
* mjr: major axis (sigma not FWHM) of the Gaussian
* mnr: minor axis (sigma not FWHM) of the Gaussian
* pa: position angle (from +x to +y) of the Gaussian, radians
Returns:
* sx: standard deviation (not variance) of x var
* sy: standard deviation (not variance) of y var
* cxy: covariance (not correlation coefficient) of x and y
"""
_ellcheck (mjr, mnr, pa)
cpa, spa = np.cos (pa), np.sin (pa)
q = np.asarray ([[cpa, -spa],
[spa, cpa]])
cov = np.dot (q, np.dot (np.diag ([mjr**2, mnr**2]), q.T))
sx = np.sqrt (cov[0,0])
sy = np.sqrt (cov[1,1])
cxy = cov[0,1]
return _bivcheck (sx, sy, cxy)
def ellabc (mjr, mnr, pa):
"""Given a 2D Gaussian expressed as an ellipse (major, minor, pa), compute the
nontrivial parameters for its evaluation.
* mjr: major axis (sigma not FWHM) of the Gaussian
* mnr: minor axis (sigma not FWHM) of the Gaussian
* pa: position angle (from +x to +y) of the Gaussian, radians
Returns: (a, b, c), where z = exp (ax² + bxy + cy²)
"""
_ellcheck (mjr, mnr, pa)
cpa, spa = np.cos (pa), np.sin (pa)
mjrm2, mnrm2 = mjr**-2, mnr**-2
a = -0.5 * (cpa**2 * mjrm2 + spa**2 * mnrm2)
c = -0.5 * (spa**2 * mjrm2 + cpa**2 * mnrm2)
b = cpa * spa * (mnrm2 - mjrm2)
return _abccheck (a, b, c)
def double_ell_distance (mjr0, mnr0, pa0, mjr1, mnr1, pa1, dx, dy):
"""Given two ellipses separated by *dx* and *dy*, compute their separation in
terms of σ. Based on Pineau et al (2011A&A...527A.126P).
The "0" ellipse is taken to be centered at (0, 0), while the "1"
ellipse is centered at (dx, dy).
"""
# 1. We need to rotate the frame so that ellipse 1 lies on the X axis.
theta = -np.arctan2 (dy, dx)
# 2. We also need to express these rotated ellipses in "biv" format.
sx0, sy0, cxy0 = ellbiv (mjr0, mnr0, pa0 + theta)
sx1, sy1, cxy1 = ellbiv (mjr1, mnr1, pa1 + theta)
# 3. Their convolution is:
sx, sy, cxy = bivconvolve (sx0, sy0, cxy0, sx1, sy1, cxy1)
# 4. The separation between the centers is still just:
d = np.sqrt (dx**2 + dy**2)
# 5. The effective sigma in the purely X direction, taking into account
# the covariance term, is:
sigma_eff = sx * np.sqrt (1 - (cxy / (sx * sy))**2)
# 6. Therefore the answer is:
return d / sigma_eff
def ellplot (mjr, mnr, pa):
"""Utility for debugging."""
_ellcheck (mjr, mnr, pa)
import omega as om
th = np.linspace (0, 2 * np.pi, 200)
x, y = ellpoint (mjr, mnr, pa, th)
return om.quickXY (x, y, 'mjr=%f mnr=%f pa=%f' %
(mjr, mnr, pa * 180 / np.pi))
# "ABC" form (maybe better called polynomial form): exp (Ax² + Bxy + Cy²)
@broadcastize (3)
def _abccheck (a, b, c):
"This returns a boolean array; True indicates bad values."
return (a >= 0) | (c >= 0) | (b**2 >= 4 * a * c)
@broadcastize (3, ret_spec=(0, 0, 0))
def abcell (a, b, c):
"""Given the nontrivial parameters for evaluation a 2D Gaussian as a
polynomial, compute the equivalent ellipse parameters (major, minor, pa)
Inputs: (a, b, c), where z = exp (ax² + bxy + cy²)
Returns:
* mjr: major axis (sigma not FWHM) of the Gaussian
* mnr: minor axis (sigma not FWHM) of the Gaussian
* pa: position angle (from +x to +y) of the Gaussian, radians
"""
from numpy import arctan2, sqrt
bad = _abccheck (a, b, c)
pa = 0.5 * arctan2 (b, a - c)
t1 = np.sqrt ((a - c)**2 + b**2)
t2 = -t1 - a - c
bad |= (t2 <= 0)
mjr = t2**-0.5
t2 = t1 - a - c
bad |= (t2 <= 0)
mnr = t2**-0.5
w = np.where (bad)
mjr[w] = np.nan
mnr[w] = np.nan
pa[w] = np.nan
return ellnorm (mjr, mnr, pa)
def abcd2 (x0, y0, a, b, c, x, y):
"""Given an 2D Gaussian expressed as the ABC polynomial coefficients, compute
a "squared distance parameter" such that
z = exp (-0.5 * d2)
Inputs:
* x0: position of Gaussian center on x axis
* y0: position of Gaussian center on y axis
* a: such that z = exp (ax² + bxy + cy²)
* b: see above
* c: see above
* x: x coordinates of the locations for which to evaluate d2
* y: y coordinates of the locations for which to evaluate d2
Returns: d2, distance parameter defined as above.
This is pretty trivial.
"""
_abccheck (a, b, c)
dx, dy = x - x0, y - y0
return -2 * (a * dx**2 + b * dx * dy + c * dy**2)
def abcplot (a, b, c, **kwargs):
_abccheck (a, b, c)
return ellplot (*abcell (a, b, c), **kwargs)
|
pkgw/pwkit
|
pwkit/ellipses.py
|
Python
|
mit
| 16,299
|
[
"Gaussian"
] |
123c47d036df0ecd0cf5d08bc00cde8368bc7e84b3b3b75b6a3daeecaf1df8fe
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module contains the error classes for the chemenv package.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
class AbstractChemenvError(Exception):
def __init__(self, cls, method, msg):
self.cls = cls
self.method = method
self.msg = msg
def __str__(self):
return str(self.cls) + ': ' + self.method + '\n' + repr(self.msg)
class ClassTypeChemenvError(AbstractChemenvError):
def __init__(self, object, classtype):
self.object = object
self.classtype = classtype
def __str__(self):
return '"{}" expected, {} found\n'.format(self.classtype.__name__, self.object.__class__.__name__)
class NeighborsNotComputedChemenvError(AbstractChemenvError):
def __init__(self, site):
self.site = site
def __str__(self):
return 'The neighbors were not computed for the following site : \n' + str(self.site)
class BVAValencesNotFoundChemenvError(AbstractChemenvError):
def __init__(self, structure):
self.structure = structure
def __str__(self):
return 'The valences were not found for the following structure : \n' +\
self.structure.composition.reduced_formula
class ChemenvStrategyError(AbstractChemenvError):
def __init__(self, cls, method, msg):
self.cls = cls
self.method = method
self.msg = msg
def __str__(self):
return str(self.cls) + ': ' + self.method + '\n' + repr(self.msg)
class InitializationChemenvError(AbstractChemenvError):
def __init__(self, cls):
self.cls = cls
def __str__(self):
return 'There is some missing arguments for the initialization of a {} object'.format(self.cls)
class EquivalentSiteSearchError(AbstractChemenvError):
def __init__(self, site):
self.site = site
def __str__(self):
return 'Equivalent site could not be found for the following site : {}'.format(str(self.site))
class VoronoiParametersError(AbstractChemenvError):
def __init__(self, vp):
self.vp = vp
def __str__(self):
return 'The list of Voronoi parameters does not contain the following set of parameters :\n' \
' - distfactor : {},\n' \
' - angfactor : {},\n' \
' - only_anion_cation_bond : {}'.format(self.vp.distance_parameter, self.vp.angle_parameter,
self.vp.only_anion_cation_bond)
class SolidAngleError(AbstractChemenvError):
def __init__(self, cosinus):
self.cosinus = cosinus
def __str__(self):
return 'Value of cosinus ({}) from which an angle should be retrieved' \
'is not between -1.0 and 1.0'.format(self.cosinus)
class RatioFunctionError(AbstractChemenvError):
def __init__(self, function):
self.function = function
def __str__(self):
return 'Function "{}" is not allowed as a ratio function.'.format(self.function)
class PenaltyFunctionError(AbstractChemenvError):
def __init__(self, function):
self.function = function
def __str__(self):
return 'Function "{}" is not possible.'.format(self.function)
class ChemenvError(Exception):
def __init__(self, cls, method, msg):
self.cls = cls
self.method = method
self.msg = msg
def __str__(self):
return str(self.cls) + ': ' + self.method + '\n' + repr(self.msg)
|
matk86/pymatgen
|
pymatgen/analysis/chemenv/utils/chemenv_errors.py
|
Python
|
mit
| 3,780
|
[
"pymatgen"
] |
092e170f9327da6093e4b455bec47367809813a21527a0a4dbe66ee033835fe1
|
# Copyright (C) 2014 LiuLang <gsushzhsosgsu@gmail.com>
# Use of this source code is governed by GPLv3 license that can be found
# in http://www.gnu.org/licenses/gpl-3.0.html
'''
用于显示桌面歌词
'''
import json
import os
import sys
import time
import traceback
import cairo
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Gtk
from kuwo import Config
_ = Config._
from kuwo import Widgets
from kuwo.log import logger
ACTIVATE = 'activated'
SIZE_MAX = 72
SIZE_MIN = 4
SHADOW_SIZE_MIN = 0
SHADOW_SIZE_MAX = 10
SHADOW_RADIUS_MIN = 0
SHADOW_RADIUS_MAX = 20
HIDE_TOOLBAR_AFTER = 2000 # 2 secs
class RightLabel(Gtk.Label):
def __init__(self, label):
super().__init__(label)
self.props.halign = Gtk.Align.END
self.props.xalign = 1
class OSDLrc(Gtk.ApplicationWindow):
def __init__(self, app):
super().__init__(application=app.app, type=Gtk.WindowType.POPUP)
self.props.decorated = False
self.props.resizable = False
self.app = app
self.has_shown = False
self.old_provider = None
# set main window opacity
screen = self.get_screen()
self.root_window = screen.get_root_window()
visual = screen.get_rgba_visual()
if visual and screen.is_composited():
self.set_visual(visual)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
self.add(box)
self.da = Gtk.Label()
self.da.old_provider = None
self.da.props.xalign = 0
box.pack_start(self.da, False, False, 0)
self.da2 = Gtk.Label()
self.da2.old_provider = None
self.da2.props.xalign = 0
box.pack_start(self.da2, False, False, 0)
self.da3 = Gtk.Label()
self.da3.old_provider = None
self.da3.props.xalign = 0
box.pack_start(self.da3, False, False, 0)
self.toolbar = Gtk.Toolbar()
self.toolbar.set_style(Gtk.ToolbarStyle.ICONS)
#self.toolbar.get_style_context().add_class(Gtk.STYLE_CLASS_TOOLBAR)
self.toolbar.set_show_arrow(False)
self.toolbar.set_icon_size(Gtk.IconSize.LARGE_TOOLBAR)
box.pack_start(self.toolbar, False, False, 0)
# 鼠标点击拖放事件
self.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
#Gdk.EventMask.BUTTON_MOTION_MASK |
#Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK)
self.mouse_pressed = False
prev_button = Gtk.ToolButton()
prev_button.set_label(_('Previous'))
prev_button.set_icon_name('media-skip-backward-symbolic')
prev_button.connect('clicked', self.on_prev_button_clicked)
self.toolbar.insert(prev_button, 0)
self.play_button = Gtk.ToolButton()
self.toolbar.insert(self.play_button, 1)
next_button = Gtk.ToolButton()
next_button.set_label(_('Next'))
next_button.set_icon_name('media-skip-forward-symbolic')
next_button.connect('clicked', self.on_next_button_clicked)
self.toolbar.insert(next_button, 2)
zoom_in_button = Gtk.ToolButton()
zoom_in_button.set_label(_('Zoom In'))
zoom_in_button.set_icon_name('zoom-in-symbolic')
zoom_in_button.connect('clicked', self.on_zoom_in_button_clicked)
self.toolbar.insert(zoom_in_button, 3)
zoom_out_button = Gtk.ToolButton()
zoom_out_button.set_label(_('Zoom Out'))
zoom_out_button.set_icon_name('zoom-out-symbolic')
zoom_out_button.connect('clicked', self.on_zoom_out_button_clicked)
self.toolbar.insert(zoom_out_button, 4)
color_button = Gtk.ToolButton()
color_button.set_label(_('Styles'))
color_button.set_icon_name('preferences-system-symbolic')
color_button.connect('clicked', self.on_color_button_clicked)
self.toolbar.insert(color_button, 5)
lock_button = Gtk.ToolButton()
lock_button.set_label(_('Lock'))
lock_button.set_icon_name('lock')
lock_button.connect('clicked', self.on_lock_button_clicked)
self.toolbar.insert(lock_button, 6)
close_button = Gtk.ToolButton()
close_button.set_label(_('Close'))
close_button.set_icon_name('window-close-symbolic')
close_button.connect('clicked', self.on_close_button_clicked)
self.toolbar.insert(close_button, 7)
# 切换窗口显隐动作
self.show_window_action = Gtk.ToggleAction('show-window-action',
_('Show OSD Window'), _('Show OSD Window'), None)
self.show_window_action.set_icon_name(
'media-view-subtitles-symbolic')
self.show_window_action.connect('toggled',
self.on_show_window_action_toggled)
# 切换窗口锁定状态
if self.app.conf['osd-locked']:
self.lock_window_action = Gtk.ToggleAction('lock-window-action',
_('UnLock OSD Window'), _('UnLock OSD lyric window'), None)
self.lock_window_action.set_active(True)
else:
self.lock_window_action = Gtk.ToggleAction('lock-window-action',
_('Lock OSD Window'), _('Lock OSD lyric window'), None)
self.lock_window_action.set_active(False)
self.lock_window_action.set_icon_name('lock')
self.lock_window_action.set_sensitive(self.app.conf['osd-show'])
self.lock_window_action.connect('toggled',
self.on_lock_window_action_toggled)
def after_init(self):
self.update_style()
if self.app.conf['osd-show']:
self.show_window_action.set_active(True)
self.play_button.props.related_action = self.app.player.playback_action
self.arrow_cursor = Gdk.Cursor(Gdk.CursorType.ARROW)
self.fleur_cursor = Gdk.Cursor(Gdk.CursorType.FLEUR)
def update_style(self):
conf = self.app.conf
if Config.GTK_LE_36:
css = '\n'.join([
'GtkWindow {',
'background-color:{0};'.format(
conf['osd-background-color']),
'}',
'.activated {',
'color: {0};'.format(conf['osd-activated-color']),
'font-size: {0};'.format(conf['osd-activated-size']),
'text-shadow: {0} {1} {2} {3};'.format(
conf['osd-activated-shadow-x'],
conf['osd-activated-shadow-y'],
conf['osd-activated-shadow-radius'],
conf['osd-activated-shadow-color']),
'}',
'GtkLabel {',
'color: {0};'.format(conf['osd-inactivated-color']),
'font-size: {0};'.format(conf['osd-inactivated-size']),
'text-shadow: {0} {1} {2} {3};'.format(
conf['osd-inactivated-shadow-x'],
conf['osd-inactivated-shadow-y'],
conf['osd-inactivated-shadow-radius'],
conf['osd-inactivated-shadow-color']),
'}',
])
else:
css = '\n'.join([
'GtkWindow {',
'background-color:{0};'.format(
conf['osd-background-color']),
'}',
'.activated {',
'color: {0};'.format(conf['osd-activated-color']),
'font-size: {0}px;'.format(conf['osd-activated-size']),
'text-shadow: {0}px {1}px {2}px {3};'.format(
conf['osd-activated-shadow-x'],
conf['osd-activated-shadow-y'],
conf['osd-activated-shadow-radius'],
conf['osd-activated-shadow-color']),
'}',
'GtkLabel {',
'color: {0};'.format(conf['osd-inactivated-color']),
'font-size: {0}px;'.format(conf['osd-inactivated-size']),
'transition-property: font-size;',
'transition: 200ms ease-in;',
'text-shadow: {0}px {1}px {2}px {3};'.format(
conf['osd-inactivated-shadow-x'],
conf['osd-inactivated-shadow-y'],
conf['osd-inactivated-shadow-radius'],
conf['osd-inactivated-shadow-color']),
'}',
])
self.old_provider = Widgets.apply_css(self, css,
old_provider=self.old_provider)
self.da.old_provider = Widgets.apply_css(self.da, css,
old_provider=self.da.old_provider)
self.da2.old_provider = Widgets.apply_css(self.da2, css,
old_provider=self.da2.old_provider)
if self.app.conf['osd-three']:
self.da3.show_all()
self.da3.old_provider = Widgets.apply_css(self.da3, css,
old_provider=self.da3.old_provider)
self.da.get_style_context().remove_class(ACTIVATE)
self.da2.get_style_context().add_class(ACTIVATE)
self.da3.get_style_context().remove_class(ACTIVATE)
else:
self.da3.hide()
def set_lrc(self, lrc_obj):
self.lrc_obj = lrc_obj
if not lrc_obj:
self.da.set_text('No lyric available')
def sync_lrc(self, line_num):
'''同步歌词'''
if not self.lrc_obj or line_num >= len(self.lrc_obj):
return
if self.app.conf['osd-three']:
if line_num == 0:
self.da.set_text('')
self.da3.set_text(self.lrc_obj[line_num+1][1])
elif line_num == len(self.lrc_obj) -1:
self.da.set_text(self.lrc_obj[line_num-1][1])
self.da3.set_text('')
else:
self.da.set_text(self.lrc_obj[line_num-1][1])
self.da3.set_text(self.lrc_obj[line_num+1][1])
self.da2.set_text(self.lrc_obj[line_num][1])
else:
if line_num == 0:
self.da.set_text(self.lrc_obj[0][1])
self.da2.set_text(self.lrc_obj[1][1])
self.da.get_style_context().add_class(ACTIVATE)
elif line_num % 2 == 1:
next_line = line_num + 1
if next_line < len(self.lrc_obj):
self.da.set_text(self.lrc_obj[next_line][1])
self.da.get_style_context().remove_class(ACTIVATE)
self.da2.set_text(self.lrc_obj[line_num][1])
self.da2.get_style_context().add_class(ACTIVATE)
else:
next_line = line_num + 1
if next_line < len(self.lrc_obj):
self.da2.set_text(self.lrc_obj[next_line][1])
self.da2.get_style_context().remove_class(ACTIVATE)
self.da.set_text(self.lrc_obj[line_num][1])
self.da.get_style_context().add_class(ACTIVATE)
def reload(self):
'''重新设定属性, 然后重绘'''
if self.app.conf['osd-locked']:
self.toolbar.hide()
try:
region = cairo.Region()
except AttributeError:
print('cairo.Region is missing, a patch is required:',
'http://bugs.debian.org/688079')
logger.error(traceback.format_exc())
return
self.input_shape_combine_region(region)
else:
self.toolbar.show_all()
self.app.conf['osd-toolbar-y'] = self.toolbar.get_allocated_height()
self.auto_hide_toolbar()
self.input_shape_combine_region(None)
self.move(self.app.conf['osd-x'], self.app.conf['osd-y'])
def auto_hide_toolbar(self):
def hide_toolbar(timestamp):
if timestamp == self.toolbar.timestamp:
self.toolbar.timestamp = 0
self.toolbar.hide()
if self.toolbar.get_visible():
timestamp = time.time()
self.toolbar.timestamp = timestamp
GLib.timeout_add(HIDE_TOOLBAR_AFTER, hide_toolbar, timestamp)
def show_window(self, show):
'''是否显示歌词窗口'''
self.app.conf['osd-show'] = show
if show:
if self.has_shown:
self.present()
else:
self.has_shown = True
self.show_all()
self.reload()
else:
self.hide()
def lock_window(self, locked):
if not self.app.conf['osd-show']:
return
self.app.conf['osd-locked'] = locked
mapped = self.get_mapped()
realized = self.get_realized()
if mapped:
self.unmap()
if realized:
self.unrealize()
if locked:
# Note that gdk_window_set_type_hint() must be set before the
# gdk window is mapped
self.props.type_hint = Gdk.WindowTypeHint.DOCK
else:
self.props.type_hint = Gdk.WindowTypeHint.NORMAL
if realized:
self.realize()
if mapped:
self.map()
self.queue_resize()
self.reload()
def on_prev_button_clicked(self, button):
self.app.player.load_prev()
def on_next_button_clicked(self, button):
self.app.player.load_next()
def on_zoom_in_button_clicked(self, button):
if self.app.conf['osd-inactivated-size'] < SIZE_MAX:
self.app.conf['osd-inactivated-size'] += 1
if self.app.conf['osd-activated-size'] < SIZE_MAX:
self.app.conf['osd-activated-size'] += 1
self.update_style()
def on_zoom_out_button_clicked(self, button):
if self.app.conf['osd-inactivated-size'] > SIZE_MIN:
self.app.conf['osd-inactivated-size'] -= 1
if self.app.conf['osd-activated-size'] > SIZE_MIN:
self.app.conf['osd-activated-size'] -= 1
self.update_style()
def on_color_button_clicked(self, button):
dialog = PreferencesDialog(self)
dialog.run()
dialog.destroy()
def on_lock_button_clicked(self, button):
self.lock_window_action.set_active(True)
def on_close_button_clicked(self, button):
self.show_window_action.set_active(False)
def on_show_window_action_toggled(self, action):
status = action.get_active()
self.show_window(status)
self.lock_window_action.set_sensitive(status)
if status:
action.set_label(_('Hide OSD Window'))
action.set_tooltip(_('Hide OSD Window'))
else:
action.set_label(_('Show OSD Window'))
action.set_tooltip(_('Show OSD Window'))
def on_lock_window_action_toggled(self, action):
if not self.app.conf['osd-show']:
return
self.lock_window(action.get_active())
if action.get_active():
action.set_label(_('UnLock OSD Window'))
else:
action.set_label(_('Lock OSD Window'))
# 以下两个事件用于自动隐去工具栏
def do_enter_notify_event(self, event):
self.toolbar.show_all()
def do_leave_notify_event(self, event):
self.auto_hide_toolbar()
# 以下事件用于处理窗口拖放移动
def do_button_press_event(self, event):
self.mouse_pressed = True
self.start_x, self.start_y = event.x, event.y
self.get_window().set_cursor(self.fleur_cursor)
def do_button_release_event(self, event):
self.app.conf['osd-x'], self.app.conf['osd-y'] = self.get_position()
self.mouse_pressed = False
self.get_window().set_cursor(self.arrow_cursor)
def do_motion_notify_event(self, event):
if not self.mouse_pressed:
return
x = int(event.x_root - self.start_x)
y = int(event.y_root - self.start_y)
self.move(x, y)
class PreferencesDialog(Gtk.Dialog):
def __init__(self, parent):
super().__init__(_('OSD Styles'), parent.app.window, 0,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
self.parent = parent
self.set_modal(False)
self.set_default_size(480, 320)
self.set_border_width(15)
box = self.get_content_area()
# Background
box.pack_start(ColorBox(parent, _('Background color:'),
'osd-background-color'), False, False, 0)
box.pack_start(SwitchBox(parent, _('Display three lines:'),
'osd-three'), False, False, 0)
# Default Label
default_label = Gtk.Label()
default_label.set_markup(_('<b>Default Text Styles</b>'))
default_label.props.xalign = 0
default_label.props.margin_top = 15
box.pack_start(default_label, False, False, 0)
box.pack_start(SpinBox(parent, _('Font size:'),
'osd-inactivated-size', SIZE_MIN, SIZE_MAX),
False, False, 0)
box.pack_start(ColorBox(parent, _('Color:'),
'osd-inactivated-color'), False, False, 0)
box.pack_start(SpinBox(parent, _('Horizontal Shadow:'),
'osd-inactivated-shadow-x', SHADOW_SIZE_MIN,
SHADOW_SIZE_MAX), False, False, 0)
box.pack_start(SpinBox(parent, _('Vertical Shadow:'),
'osd-inactivated-shadow-y', SHADOW_SIZE_MIN,
SHADOW_SIZE_MAX), False, False, 0)
box.pack_start(SpinBox(parent, _('Shadow radius:'),
'osd-inactivated-shadow-radius', SHADOW_RADIUS_MIN,
SHADOW_RADIUS_MAX), False, False, 0)
box.pack_start(ColorBox(parent, _('Shadow color:'),
'osd-inactivated-shadow-color'), False, False, 0)
# Activated Label
activated_label = Gtk.Label()
activated_label.set_markup(_('<b>Activated Text Styles</b>'))
activated_label.props.margin_top = 15
activated_label.props.xalign = 0
box.pack_start(activated_label, False, False, 0)
box.pack_start(SpinBox(parent, _('Font size:'),
'osd-activated-size', SIZE_MIN, SIZE_MAX),
False, False, 0)
box.pack_start(ColorBox(parent, _('Color:'),
'osd-activated-color'), False, False, 0)
box.pack_start(SpinBox(parent, _('Horizontal Shadow:'),
'osd-activated-shadow-x', SHADOW_SIZE_MIN,
SHADOW_SIZE_MAX), False, False, 0)
box.pack_start(SpinBox(parent, _('Vertical Shadow:'),
'osd-activated-shadow-y', SHADOW_SIZE_MIN,
SHADOW_SIZE_MAX), False, False, 0)
box.pack_start(SpinBox(parent, _('Shadow radius:'),
'osd-activated-shadow-radius', SHADOW_RADIUS_MIN,
SHADOW_RADIUS_MAX), False, False, 0)
box.pack_start(ColorBox(parent, _('Shadow color:'),
'osd-activated-shadow-color'), False, False, 0)
box.show_all()
class SwitchBox(Gtk.Box):
def __init__(self, osd, label_name, name):
super().__init__()
self.osd = osd
self.conf = osd.app.conf
self.props.margin_left = 10
label = Gtk.Label(label_name)
self.pack_start(label, False, False, 0)
switch_button = Gtk.Switch()
switch_button.set_active(self.conf[name])
switch_button.props.halign = Gtk.Align.END
switch_button.connect('notify::active',
self.on_switch_button_activated, name)
self.pack_end(switch_button, True, True, 0)
def on_switch_button_activated(self, switch_button, event, name):
self.conf[name] = switch_button.get_active()
self.osd.update_style()
class ColorBox(Gtk.Box):
def __init__(self, osd, label_name, name):
super().__init__()
self.osd = osd
self.conf = osd.app.conf
self.props.margin_left = 10
label = Gtk.Label(label_name)
self.pack_start(label, False, False, 0)
rgba = Gdk.RGBA()
rgba.parse(self.conf[name])
color_button = Gtk.ColorButton.new_with_rgba(rgba)
color_button.props.use_alpha = True
color_button.props.halign = Gtk.Align.END
color_button.connect('color-set', self.on_color_button_set, name)
self.pack_end(color_button, True, True, 0)
def on_color_button_set(self, color_button, name):
color_rgba = color_button.get_rgba()
if color_rgba.alpha == 1:
color_rgba.alpha = 0.999
self.conf[name] = color_rgba.to_string()
self.osd.update_style()
class SpinBox(Gtk.Box):
def __init__(self, osd, label_name, name, min_size, max_size):
super().__init__()
self.osd = osd
self.conf = osd.app.conf
self.props.margin_left = 10
label = Gtk.Label(label_name)
self.pack_start(label, False, False, 0)
spin_button = Gtk.SpinButton.new_with_range(min_size, max_size, 1)
spin_button.set_value(self.conf[name])
spin_button.props.halign = Gtk.Align.END
spin_button.connect('value-changed', self.on_spin_button_changed,
name)
self.pack_end(spin_button, True, True, 0)
def on_spin_button_changed(self, spin_button, name):
self.conf[name] = spin_button.get_value()
self.osd.update_style()
|
lennyhbt/kwplayer
|
kuwo/OSDLrc.py
|
Python
|
gpl-3.0
| 21,651
|
[
"FLEUR"
] |
410a80f8c9d8dc496a100a58c27573869db9bc5fb44df7918482bd63491c8a21
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_serviceengine
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ServiceEngine Avi RESTful Object
description:
- This module is used to configure ServiceEngine object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
availability_zone:
description:
- Availability_zone of serviceengine.
cloud_ref:
description:
- It is a reference to an object of type cloud.
container_mode:
description:
- Boolean flag to set container_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
container_type:
description:
- Enum options - container_type_bridge, container_type_host, container_type_host_dpdk.
- Default value when not specified in API or module is interpreted by Avi Controller as CONTAINER_TYPE_HOST.
controller_created:
description:
- Boolean flag to set controller_created.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
controller_ip:
description:
- Controller_ip of serviceengine.
data_vnics:
description:
- List of vnic.
enable_state:
description:
- Inorder to disable se set this field appropriately.
- Enum options - SE_STATE_ENABLED, SE_STATE_DISABLED_FOR_PLACEMENT, SE_STATE_DISABLED.
- Default value when not specified in API or module is interpreted by Avi Controller as SE_STATE_ENABLED.
flavor:
description:
- Flavor of serviceengine.
host_ref:
description:
- It is a reference to an object of type vimgrhostruntime.
hypervisor:
description:
- Enum options - default, vmware_esx, kvm, vmware_vsan, xen.
mgmt_vnic:
description:
- Vnic settings for serviceengine.
name:
description:
- Name of the object.
- Default value when not specified in API or module is interpreted by Avi Controller as VM name unknown.
resources:
description:
- Seresources settings for serviceengine.
se_group_ref:
description:
- It is a reference to an object of type serviceenginegroup.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ServiceEngine object
avi_serviceengine:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_serviceengine
"""
RETURN = '''
obj:
description: ServiceEngine (api/serviceengine) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
availability_zone=dict(type='str',),
cloud_ref=dict(type='str',),
container_mode=dict(type='bool',),
container_type=dict(type='str',),
controller_created=dict(type='bool',),
controller_ip=dict(type='str',),
data_vnics=dict(type='list',),
enable_state=dict(type='str',),
flavor=dict(type='str',),
host_ref=dict(type='str',),
hypervisor=dict(type='str',),
mgmt_vnic=dict(type='dict',),
name=dict(type='str',),
resources=dict(type='dict',),
se_group_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'serviceengine',
set([]))
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_serviceengine.py
|
Python
|
bsd-3-clause
| 5,728
|
[
"VisIt"
] |
8ef9cf698533e458235b42d5ef64ea591ae129ecba90e7fcf75b198c0ac2b72c
|
"""Coders for individual Variable objects."""
import warnings
from functools import partial
from typing import Any
import numpy as np
import pandas as pd
from ..core import dtypes, duck_array_ops, indexing
from ..core.pycompat import dask_array_type
from ..core.utils import equivalent
from ..core.variable import Variable
class SerializationWarning(RuntimeWarning):
"""Warnings about encoding/decoding issues in serialization."""
class VariableCoder:
"""Base class for encoding and decoding transformations on variables.
We use coders for transforming variables between xarray's data model and
a format suitable for serialization. For example, coders apply CF
conventions for how data should be represented in netCDF files.
Subclasses should implement encode() and decode(), which should satisfy
the identity ``coder.decode(coder.encode(variable)) == variable``. If any
options are necessary, they should be implemented as arguments to the
__init__ method.
The optional name argument to encode() and decode() exists solely for the
sake of better error messages, and should correspond to the name of
variables in the underlying store.
"""
def encode(self, variable, name=None):
# type: (Variable, Any) -> Variable
"""Convert an encoded variable to a decoded variable."""
raise NotImplementedError
def decode(self, variable, name=None):
# type: (Variable, Any) -> Variable
"""Convert an decoded variable to a encoded variable."""
raise NotImplementedError
class _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Lazily computed array holding values of elemwise-function.
Do not construct this object directly: call lazy_elemwise_func instead.
Values are computed upon indexing or coercion to a NumPy array.
"""
def __init__(self, array, func, dtype):
assert not isinstance(array, dask_array_type)
self.array = indexing.as_indexable(array)
self.func = func
self._dtype = dtype
@property
def dtype(self):
return np.dtype(self._dtype)
def __getitem__(self, key):
return type(self)(self.array[key], self.func, self.dtype)
def __array__(self, dtype=None):
return self.func(self.array)
def __repr__(self):
return ("%s(%r, func=%r, dtype=%r)" %
(type(self).__name__, self.array, self.func, self.dtype))
def lazy_elemwise_func(array, func, dtype):
"""Lazily apply an element-wise function to an array.
Parameters
----------
array : any valid value of Variable._data
func : callable
Function to apply to indexed slices of an array. For use with dask,
this should be a pickle-able object.
dtype : coercible to np.dtype
Dtype for the result of this function.
Returns
-------
Either a dask.array.Array or _ElementwiseFunctionArray.
"""
if isinstance(array, dask_array_type):
return array.map_blocks(func, dtype=dtype)
else:
return _ElementwiseFunctionArray(array, func, dtype)
def unpack_for_encoding(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def unpack_for_decoding(var):
return var.dims, var._data, var.attrs.copy(), var.encoding.copy()
def safe_setitem(dest, key, value, name=None):
if key in dest:
var_str = ' on variable {!r}'.format(name) if name else ''
raise ValueError(
'failed to prevent overwriting existing key {} in attrs{}. '
'This is probably an encoding field used by xarray to describe '
'how a variable is serialized. To proceed, remove this key from '
"the variable's attributes manually.".format(key, var_str))
dest[key] = value
def pop_to(source, dest, key, name=None):
"""
A convenience function which pops a key k from source to dest.
None values are not passed on. If k already exists in dest an
error is raised.
"""
value = source.pop(key, None)
if value is not None:
safe_setitem(dest, key, value, name=name)
return value
def _apply_mask(
data: np.ndarray,
encoded_fill_values: list,
decoded_fill_value: Any,
dtype: Any,
) -> np.ndarray:
"""Mask all matching values in a NumPy arrays."""
data = np.asarray(data, dtype=dtype)
condition = False
for fv in encoded_fill_values:
condition |= data == fv
return np.where(condition, decoded_fill_value, data)
class CFMaskCoder(VariableCoder):
"""Mask or unmask fill values according to CF conventions."""
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
fv = encoding.get('_FillValue')
mv = encoding.get('missing_value')
if fv is not None and mv is not None and not equivalent(fv, mv):
raise ValueError("Variable {!r} has multiple fill values {}. "
"Cannot encode data. "
.format(name, [fv, mv]))
if fv is not None:
fill_value = pop_to(encoding, attrs, '_FillValue', name=name)
if not pd.isnull(fill_value):
data = duck_array_ops.fillna(data, fill_value)
if mv is not None:
fill_value = pop_to(encoding, attrs, 'missing_value', name=name)
if not pd.isnull(fill_value) and fv is None:
data = duck_array_ops.fillna(data, fill_value)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
raw_fill_values = [pop_to(attrs, encoding, attr, name=name)
for attr in ('missing_value', '_FillValue')]
if raw_fill_values:
encoded_fill_values = {fv for option in raw_fill_values
for fv in np.ravel(option)
if not pd.isnull(fv)}
if len(encoded_fill_values) > 1:
warnings.warn("variable {!r} has multiple fill values {}, "
"decoding all values to NaN."
.format(name, encoded_fill_values),
SerializationWarning, stacklevel=3)
dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype)
if encoded_fill_values:
transform = partial(_apply_mask,
encoded_fill_values=encoded_fill_values,
decoded_fill_value=decoded_fill_value,
dtype=dtype)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
def _scale_offset_decoding(data, scale_factor, add_offset, dtype):
data = np.array(data, dtype=dtype, copy=True)
if scale_factor is not None:
data *= scale_factor
if add_offset is not None:
data += add_offset
return data
def _choose_float_dtype(dtype, has_offset):
"""Return a float dtype that can losslessly represent `dtype` values."""
# Keep float32 as-is. Upcast half-precision to single-precision,
# because float16 is "intended for storage but not computation"
if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating):
return np.float32
# float32 can exactly represent all integers up to 24 bits
if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer):
# A scale factor is entirely safe (vanishing into the mantissa),
# but a large integer offset could lead to loss of precision.
# Sensitivity analysis can be tricky, so we just use a float64
# if there's any offset at all - better unoptimised than wrong!
if not has_offset:
return np.float32
# For all other types and circumstances, we just use float64.
# (safe because eg. complex numbers are not supported in NetCDF)
return np.float64
class CFScaleOffsetCoder(VariableCoder):
"""Scale and offset variables according to CF conventions.
Follows the formula:
decode_values = encoded_values * scale_factor + add_offset
"""
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if 'scale_factor' in encoding or 'add_offset' in encoding:
dtype = _choose_float_dtype(data.dtype, 'add_offset' in encoding)
data = data.astype(dtype=dtype, copy=True)
if 'add_offset' in encoding:
data -= pop_to(encoding, attrs, 'add_offset', name=name)
if 'scale_factor' in encoding:
data /= pop_to(encoding, attrs, 'scale_factor', name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if 'scale_factor' in attrs or 'add_offset' in attrs:
scale_factor = pop_to(attrs, encoding, 'scale_factor', name=name)
add_offset = pop_to(attrs, encoding, 'add_offset', name=name)
dtype = _choose_float_dtype(data.dtype, 'add_offset' in attrs)
transform = partial(_scale_offset_decoding,
scale_factor=scale_factor,
add_offset=add_offset,
dtype=dtype)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
class UnsignedIntegerCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
# from netCDF best practices
# https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html
# "_Unsigned = "true" to indicate that
# integer data should be treated as unsigned"
if encoding.get('_Unsigned', 'false') == 'true':
pop_to(encoding, attrs, '_Unsigned')
signed_dtype = np.dtype('i%s' % data.dtype.itemsize)
if '_FillValue' in attrs:
new_fill = signed_dtype.type(attrs['_FillValue'])
attrs['_FillValue'] = new_fill
data = duck_array_ops.around(data).astype(signed_dtype)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if '_Unsigned' in attrs:
unsigned = pop_to(attrs, encoding, '_Unsigned')
if data.dtype.kind == 'i':
if unsigned == 'true':
unsigned_dtype = np.dtype('u%s' % data.dtype.itemsize)
transform = partial(np.asarray, dtype=unsigned_dtype)
data = lazy_elemwise_func(data, transform, unsigned_dtype)
if '_FillValue' in attrs:
new_fill = unsigned_dtype.type(attrs['_FillValue'])
attrs['_FillValue'] = new_fill
else:
warnings.warn("variable %r has _Unsigned attribute but is not "
"of integer type. Ignoring attribute." % name,
SerializationWarning, stacklevel=3)
return Variable(dims, data, attrs, encoding)
|
shoyer/xray
|
xarray/coding/variables.py
|
Python
|
apache-2.0
| 11,417
|
[
"NetCDF"
] |
0c3d9398b21b0d4b842780ab10796add146cc1e165d2a75d7282f5d9ae08cd45
|
""" CSAPI exposes update functionalities to the Configuration.
Most of these functions can only be done by administrators
"""
__RCSID__ = "$Id$"
import types
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities import List
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security import Locations
from DIRAC.ConfigurationSystem.private.Modificator import Modificator
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
class CSAPI( object ):
""" CSAPI objects need an initialization phase
"""
def __init__( self ):
"""
Initialization function
"""
self.__csModified = False
self.__baseSecurity = "/Registry"
self.__baseResources = '/Resources_new'
self.__userDN = ''
self.__userGroup = ''
self.__rpcClient = None
self.__csMod = None
self.__initialized = S_ERROR( "Not initialized" )
self.initialize()
if not self.__initialized[ 'OK' ]:
gLogger.error( self.__initialized )
def __getProxyID( self ):
proxyLocation = Locations.getProxyLocation()
if not proxyLocation:
gLogger.error( "No proxy found!" )
return False
chain = X509Chain()
if not chain.loadProxyFromFile( proxyLocation ):
gLogger.error( "Can't read proxy!", proxyLocation )
return False
retVal = chain.getIssuerCert()
if not retVal[ 'OK' ]:
gLogger.error( "Can't parse proxy!", retVal[ 'Message' ] )
return False
idCert = retVal[ 'Value' ]
self.__userDN = idCert.getSubjectDN()[ 'Value' ]
self.__userGroup = chain.getDIRACGroup()[ 'Value' ]
return True
def __getCertificateID( self ):
certLocation = Locations.getHostCertificateAndKeyLocation()
if not certLocation:
gLogger.error( "No certificate found!" )
return False
chain = X509Chain()
retVal = chain.loadChainFromFile( certLocation[ 0 ] )
if not retVal[ 'OK' ]:
gLogger.error( "Can't parse certificate!", retVal[ 'Message' ] )
return False
idCert = chain.getIssuerCert()[ 'Value' ]
self.__userDN = idCert.getSubjectDN()[ 'Value' ]
self.__userGroup = 'host'
return True
def initialize( self ):
if self.__initialized[ 'OK' ]:
return self.__initialized
if not gConfig.useServerCertificate():
res = self.__getProxyID()
else:
res = self.__getCertificateID()
if not res:
self.__initialized = S_ERROR( "Cannot locate client credentials" )
return self.__initialized
retVal = gConfig.getOption( "/DIRAC/Configuration/MasterServer" )
if not retVal[ 'OK' ]:
self.__initialized = S_ERROR( "Master server is not known. Is everything initialized?" )
return self.__initialized
self.__rpcClient = RPCClient( gConfig.getValue( "/DIRAC/Configuration/MasterServer", "" ) )
self.__csMod = Modificator( self.__rpcClient, "%s - %s" % ( self.__userGroup, self.__userDN ) )
retVal = self.downloadCSData()
if not retVal[ 'OK' ]:
self.__initialized = S_ERROR( "Can not download the remote cfg. Is everything initialized?" )
return self.__initialized
self.__initialized = S_OK()
return self.__initialized
def downloadCSData( self ):
if not self.__csMod:
return S_ERROR( "CSAPI not yet initialized" )
result = self.__csMod.loadFromRemote()
if not result[ 'OK' ]:
return result
self.__csModified = False
self.__csMod.updateGConfigurationData()
return S_OK()
def listUsers( self , group = False ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
if not group:
return S_OK( self.__csMod.getSections( "%s/Users" % self.__baseSecurity ) )
else:
users = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if not users:
return S_OK( [] )
else:
return S_OK( List.fromChar( users ) )
def listHosts( self ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__csMod.getSections( "%s/Hosts" % self.__baseSecurity ) )
def describeUsers( self, users = None ):
if users is None: users = []
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__describeEntity( users ) )
def describeHosts( self, hosts = None ):
if hosts is None: hosts = []
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__describeEntity( hosts, True ) )
def __describeEntity( self, mask, hosts = False ):
if hosts:
csSection = "%s/Hosts" % self.__baseSecurity
else:
csSection = "%s/Users" % self.__baseSecurity
if mask:
entities = [ entity for entity in self.__csMod.getSections( csSection ) if entity in mask ]
else:
entities = self.__csMod.getSections( csSection )
entitiesDict = {}
for entity in entities:
entitiesDict[ entity ] = {}
for option in self.__csMod.getOptions( "%s/%s" % ( csSection, entity ) ):
entitiesDict[ entity ][ option ] = self.__csMod.getValue( "%s/%s/%s" % ( csSection, entity, option ) )
if not hosts:
groupsDict = self.describeGroups()[ 'Value' ]
entitiesDict[ entity ][ 'Groups' ] = []
for group in groupsDict:
if 'Users' in groupsDict[ group ] and entity in groupsDict[ group ][ 'Users' ]:
entitiesDict[ entity ][ 'Groups' ].append( group )
entitiesDict[ entity ][ 'Groups' ].sort()
return entitiesDict
def listGroups( self ):
"""
List all groups
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__csMod.getSections( "%s/Groups" % self.__baseSecurity ) )
def describeGroups( self, mask = None ):
"""
List all groups that are in the mask (or all if no mask) with their properties
"""
if mask is None: mask = []
if not self.__initialized[ 'OK' ]:
return self.__initialized
groups = [ group for group in self.__csMod.getSections( "%s/Groups" % self.__baseSecurity ) if not mask or ( mask and group in mask ) ]
groupsDict = {}
for group in groups:
groupsDict[ group ] = {}
for option in self.__csMod.getOptions( "%s/Groups/%s" % ( self.__baseSecurity, group ) ):
groupsDict[ group ][ option ] = self.__csMod.getValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, group, option ) )
if option in ( "Users", "Properties" ):
groupsDict[ group ][ option ] = List.fromChar( groupsDict[ group ][ option ] )
return S_OK( groupsDict )
def deleteUsers( self, users ):
"""
Delete a user/s can receive as a param either a string or a list
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if type( users ) == types.StringType:
users = [ users ]
usersData = self.describeUsers( users )['Value']
for username in users:
if not username in usersData:
gLogger.warn( "User %s does not exist" )
continue
userGroups = usersData[ username ][ 'Groups' ]
for group in userGroups:
self.__removeUserFromGroup( group, username )
gLogger.info( "Deleted user %s from group %s" % ( username, group ) )
self.__csMod.removeSection( "%s/Users/%s" % ( self.__baseSecurity, username ) )
gLogger.info( "Deleted user %s" % username )
self.__csModified = True
return S_OK( True )
def __removeUserFromGroup( self, group, username ):
"""
Remove user from a group
"""
usersInGroup = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if usersInGroup is not None:
userList = List.fromChar( usersInGroup, "," )
userPos = userList.index( username )
userList.pop( userPos )
self.__csMod.setOptionValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ), ",".join( userList ) )
def __addUserToGroup( self, group, username ):
"""
Add user to a group
"""
usersInGroup = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if usersInGroup is not None:
userList = List.fromChar( usersInGroup )
if username not in userList:
userList.append( username )
self.__csMod.setOptionValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ), ",".join( userList ) )
else:
gLogger.warn( "User %s is already in group %s" % ( username, group ) )
def addUser( self, username, properties ):
"""
Add a user to the cs
- username
- properties is a dict with keys:
- DN
- groups
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
for prop in ( "DN", "Groups" ):
if prop not in properties:
gLogger.error( "Missing property for user", "%s: %s" % ( prop, username ) )
return S_OK( False )
if username in self.listUsers()['Value']:
gLogger.error( "User is already registered", username )
return S_OK( False )
groups = self.listGroups()['Value']
for userGroup in properties[ 'Groups' ]:
if not userGroup in groups:
gLogger.error( "User group is not a valid group", "%s %s" % ( username, userGroup ) )
return S_OK( False )
self.__csMod.createSection( "%s/Users/%s" % ( self.__baseSecurity, username ) )
for prop in properties:
if prop == "Groups":
continue
self.__csMod.setOptionValue( "%s/Users/%s/%s" % ( self.__baseSecurity, username, prop ), properties[ prop ] )
for userGroup in properties[ 'Groups' ]:
gLogger.info( "Added user %s to group %s" % ( username, userGroup ) )
self.__addUserToGroup( userGroup, username )
gLogger.info( "Registered user %s" % username )
self.__csModified = True
return S_OK( True )
def modifyUser( self, username, properties, createIfNonExistant = False ):
"""
Modify a user
- username
- properties is a dict with keys:
- DN
- Groups
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
modifiedUser = False
userData = self.describeUsers( [ username ] )['Value']
if username not in userData:
if createIfNonExistant:
gLogger.info( "Registering user %s" % username )
return self.addUser( username, properties )
gLogger.error( "User is not registered", username )
return S_OK( False )
for prop in properties:
if prop == "Groups":
continue
prevVal = self.__csMod.getValue( "%s/Users/%s/%s" % ( self.__baseSecurity, username, prop ) )
if not prevVal or prevVal != properties[ prop ]:
gLogger.info( "Setting %s property for user %s to %s" % ( prop, username, properties[ prop ] ) )
self.__csMod.setOptionValue( "%s/Users/%s/%s" % ( self.__baseSecurity, username, prop ), properties[ prop ] )
modifiedUser = True
if 'Groups' in properties:
groups = self.listGroups()['Value']
for userGroup in properties[ 'Groups' ]:
if not userGroup in groups:
gLogger.error( "User group is not a valid group", "%s %s" % ( username, userGroup ) )
return S_OK( False )
groupsToBeDeletedFrom = []
groupsToBeAddedTo = []
for prevGroup in userData[ username ][ 'Groups' ]:
if prevGroup not in properties[ 'Groups' ]:
groupsToBeDeletedFrom.append( prevGroup )
modifiedUser = True
for newGroup in properties[ 'Groups' ]:
if newGroup not in userData[ username ][ 'Groups' ]:
groupsToBeAddedTo.append( newGroup )
modifiedUser = True
for group in groupsToBeDeletedFrom:
self.__removeUserFromGroup( group, username )
gLogger.info( "Removed user %s from group %s" % ( username, group ) )
for group in groupsToBeAddedTo:
self.__addUserToGroup( group, username )
gLogger.info( "Added user %s to group %s" % ( username, group ) )
if modifiedUser:
gLogger.info( "Modified user %s" % username )
self.__csModified = True
else:
gLogger.info( "Nothing to modify for user %s" % username )
return S_OK( True )
def addGroup( self, groupname, properties ):
"""
Add a group to the cs
- groupname
- properties is a dict with keys:
- Users
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if groupname in self.listGroups()['Value']:
gLogger.error( "Group is already registered", groupname )
return S_OK( False )
self.__csMod.createSection( "%s/Groups/%s" % ( self.__baseSecurity, groupname ) )
for prop in properties:
self.__csMod.setOptionValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ), properties[ prop ] )
gLogger.info( "Registered group %s" % groupname )
self.__csModified = True
return S_OK( True )
def modifyGroup( self, groupname, properties, createIfNonExistant = False ):
"""
Modify a user
- groupname
- properties is a dict with keys:
- Users
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
modifiedGroup = False
groupData = self.describeGroups( [ groupname ] )['Value']
if groupname not in groupData:
if createIfNonExistant:
gLogger.info( "Registering group %s" % groupname )
return self.addGroup( groupname, properties )
gLogger.error( "Group is not registered", groupname )
return S_OK( False )
for prop in properties:
prevVal = self.__csMod.getValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ) )
if not prevVal or prevVal != properties[ prop ]:
gLogger.info( "Setting %s property for group %s to %s" % ( prop, groupname, properties[ prop ] ) )
self.__csMod.setOptionValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ), properties[ prop ] )
modifiedGroup = True
if modifiedGroup:
gLogger.info( "Modified group %s" % groupname )
self.__csModified = True
else:
gLogger.info( "Nothing to modify for group %s" % groupname )
return S_OK( True )
def addHost( self, hostname, properties ):
"""
Add a host to the cs
- hostname
- properties is a dict with keys:
- DN
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
for prop in ( "DN", ):
if prop not in properties:
gLogger.error( "Missing property for host", "%s %s" % ( prop, hostname ) )
return S_OK( False )
if hostname in self.listHosts()['Value']:
gLogger.error( "Host is already registered", hostname )
return S_OK( False )
self.__csMod.createSection( "%s/Hosts/%s" % ( self.__baseSecurity, hostname ) )
for prop in properties:
self.__csMod.setOptionValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ), properties[ prop ] )
gLogger.info( "Registered host %s" % hostname )
self.__csModified = True
return S_OK( True )
def addShifter( self, shifters = None ):
"""
Adds or modify one or more shifters. Also, adds the shifter section in case this is not present.
Shifter identities are used in several places, mostly for running agents
shifters should be in the form {'ShifterRole':{'User':'aUserName', 'Group':'aDIRACGroup'}}
:return: S_OK/S_ERROR
"""
def getOpsSection( ):
"""
Where is the shifters section?
"""
vo = CSGlobals.getVO( )
setup = CSGlobals.getSetup( )
if vo:
res = gConfig.getSections( '/Operations/%s/%s/Shifter' % (vo, setup) )
if res['OK']:
return '/Operations/%s/%s/Shifter' % (vo, setup)
res = gConfig.getSections( '/Operations/%s/Defaults/Shifter' % vo )
if res['OK']:
return '/Operations/%s/Defaults/Shifter' % vo
else:
res = gConfig.getSections( '/Operations/%s/Shifter' % setup )
if res['OK']:
return '/Operations/%s/Shifter' % setup
res = gConfig.getSections( '/Operations/Defaults/Shifter' )
if res['OK']:
return '/Operations/Defaults/Shifter'
raise RuntimeError( "no shifter section???" )
if shifters is None: shifters = {}
if not self.__initialized['OK']:
return self.__initialized
# get current shifters
opsH = Operations( )
currentShifterRoles = opsH.getSections( 'Shifter' )
if not currentShifterRoles['OK']:
# we assume the shifter section is not present
currentShifterRoles = []
else:
currentShifterRoles = currentShifterRoles['Value']
currentShiftersDict = {}
for currentShifterRole in currentShifterRoles:
currentShifter = opsH.getOptionsDict( 'Shifter/%s' % currentShifterRole )
if not currentShifter['OK']:
return currentShifter
currentShifter = currentShifter['Value']
currentShiftersDict[currentShifterRole] = currentShifter
# Removing from shifters what does not need to be changed
for sRole in shifters:
if sRole in currentShiftersDict:
if currentShiftersDict[sRole] == shifters[sRole]:
shifters.pop( sRole )
#shifters section to modify
section = getOpsSection( )
#add or modify shifters
for shifter in shifters:
self.__csMod.removeSection( section + '/' + shifter )
self.__csMod.createSection( section + '/' + shifter )
self.__csMod.createSection( section + '/' + shifter + '/' + 'User' )
self.__csMod.createSection( section + '/' + shifter + '/' + 'Group' )
self.__csMod.setOptionValue( section + '/' + shifter + '/' + 'User', shifters[shifter]['User'] )
self.__csMod.setOptionValue( section + '/' + shifter + '/' + 'Group', shifters[shifter]['Group'] )
self.__csModified = True
return S_OK( True )
def modifyHost( self, hostname, properties, createIfNonExistant = False ):
"""
Modify a user
- hostname
- properties is a dict with keys:
- DN
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
modifiedHost = False
hostData = self.describeHosts( [ hostname ] )['Value']
if hostname not in hostData:
if createIfNonExistant:
gLogger.info( "Registering host %s" % hostname )
return self.addHost( hostname, properties )
gLogger.error( "Host is not registered", hostname )
return S_OK( False )
for prop in properties:
prevVal = self.__csMod.getValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ) )
if not prevVal or prevVal != properties[ prop ]:
gLogger.info( "Setting %s property for host %s to %s" % ( prop, hostname, properties[ prop ] ) )
self.__csMod.setOptionValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ), properties[ prop ] )
modifiedHost = True
if modifiedHost:
gLogger.info( "Modified host %s" % hostname )
self.__csModified = True
else:
gLogger.info( "Nothing to modify for host %s" % hostname )
return S_OK( True )
def syncUsersWithCFG( self, usersCFG ):
"""
Sync users with the cfg contents. Usernames have to be sections containing
DN, Groups, and extra properties as parameters
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
done = True
for user in usersCFG.listSections():
properties = {}
propList = usersCFG[ user ].listOptions()
for prop in propList:
if prop == "Groups":
properties[ prop ] = List.fromChar( usersCFG[ user ][ prop ] )
else:
properties[ prop ] = usersCFG[ user ][ prop ]
if not self.modifyUser( user, properties, createIfNonExistant = True ):
done = False
return S_OK( done )
def sortUsersAndGroups( self ):
self.__csMod.sortAlphabetically( "%s/Users" % self.__baseSecurity )
self.__csMod.sortAlphabetically( "%s/Hosts" % self.__baseSecurity )
for group in self.__csMod.getSections( "%s/Groups" % self.__baseSecurity ):
usersOptionPath = "%s/Groups/%s/Users" % ( self.__baseSecurity, group )
users = self.__csMod.getValue( usersOptionPath )
usersList = List.fromChar( users )
usersList.sort()
sortedUsers = ", ".join( usersList )
if users != sortedUsers:
self.__csMod.setOptionValue( usersOptionPath, sortedUsers )
def checkForUnexistantUsersInGroups( self ):
allUsers = self.__csMod.getSections( "%s/Users" % self.__baseSecurity )
allGroups = self.__csMod.getSections( "%s/Groups" % self.__baseSecurity )
for group in allGroups:
usersInGroup = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if usersInGroup:
filteredUsers = []
usersInGroup = List.fromChar( usersInGroup )
for user in usersInGroup:
if user in allUsers:
filteredUsers.append( user )
self.__csMod.setOptionValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ),
",".join( filteredUsers ) )
def __addResourceLikeSection( self, resourcePath, resourceDict ):
""" Add one of Resource level entries ( site, resource, access point )
"""
self.__csMod.createSection( resourcePath )
for property in resourceDict:
value = resourceDict[property]
if type( value ) in types.StringTypes:
self.__csMod.setOptionValue( "%s/%s" % ( resourcePath, property ), value )
elif type( value ) == types.ListType:
optValue = ','.join(value)
self.__csMod.setOptionValue( "%s/%s" % ( resourcePath, property ), optValue )
elif type( value ) == types.DictType:
self.__csMod.createSection( "%s/%s" % ( resourcePath, property ) )
for option in value:
newValue = value[option]
if type( newValue ) in types.StringTypes:
self.__csMod.setOptionValue( "%s/%s/%s" % ( resourcePath, property, option ), newValue )
elif type( value ) == types.ListType:
optValue = ','.join( newValue)
self.__csMod.setOptionValue( "%s/%s/%s" % ( resourcePath, property, option ), optValue )
self.__csModified = True
return S_OK( True )
def addSite( self, siteName, siteDict ):
""" Add a new Site to the CS
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
sitePath = "%s/Sites/%s" % ( self.__baseResources, siteName )
if self.__csMod.existsSection( sitePath ):
return S_ERROR( 'Site %s already exists ' % siteName )
return self.__addResourceLikeSection( sitePath, siteDict )
def addResource( self, siteName, resourceType, resourceName, resourceDict ):
""" Add a new Resource to the CS
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
sitePath = "%s/Sites/%s" % ( self.__baseResources, siteName )
if not self.__csMod.existsSection( sitePath ):
return S_ERROR( 'Site %s does not exist' % siteName )
resourcePath = "%s/Sites/%s/%s/%s" % ( self.__baseResources, siteName, resourceType, resourceName )
if self.__csMod.existsSection( resourcePath ):
return S_ERROR( '%s resource %s at site %s already exists' % ( resourceType, resourceName, siteName ) )
return self.__addResourceLikeSection( resourcePath, resourceDict )
def addNode( self, siteName, resourceType, resourceName, apType, apName, apDict ):
""" Add a new site to the CS
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
sitePath = "%s/Sites/%s" % ( self.__baseResources, siteName )
if not self.__csMod.existsSection( sitePath ):
return S_ERROR( 'Site %s does not exist' % siteName )
resourcePath = "%s/Sites/%s/%s/%s" % ( self.__baseResources, siteName, resourceType, resourceName )
if not self.__csMod.existsSection( resourcePath ):
return S_ERROR( '%s resource %s at site %s does not exist' % ( resourceType, resourceName, siteName ) )
apPath = "%s/Sites/%s/%s/%s/%s/%s" % ( self.__baseResources, siteName, resourceType, resourceName, apType, apName )
if self.__csMod.existsSection( apPath ):
return S_ERROR( '%s access point %s at %s resource %s at site %s already exists ' % \
( apType, apName, resourceType, resourceName, siteName ) )
return self.__addResourceLikeSection( apPath, apDict )
def sortSection( self, section ):
self.__csMod.sortAlphabetically( section )
def commitChanges( self, sortUsers = True ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
if self.__csModified:
self.checkForUnexistantUsersInGroups()
if sortUsers:
self.sortUsersAndGroups()
retVal = self.__csMod.commit()
if not retVal[ 'OK' ]:
gLogger.error( "Can't commit new configuration data", "%s" % retVal[ 'Message' ] )
return retVal
return self.downloadCSData()
return S_OK()
def commit( self ):
""" Commit the accumulated changes to the CS server
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if self.__csModified:
retVal = self.__csMod.commit()
if not retVal[ 'OK' ]:
gLogger.error( "Can't commit new configuration data", "%s" % retVal[ 'Message' ] )
return retVal
return self.downloadCSData()
return S_OK()
def mergeFromCFG( self, cfg ):
""" Merge the internal CFG data with the input
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.mergeFromCFG( cfg )
self.__csModified = True
return S_OK()
def modifyValue( self, optionPath, newValue ):
"""Modify an existing value at the specified options path.
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
prevVal = self.__csMod.getValue( optionPath )
if not prevVal:
return S_ERROR( 'Trying to set %s to %s but option does not exist' % ( optionPath, newValue ) )
gLogger.verbose( "Changing %s from \n%s \nto \n%s" % ( optionPath, prevVal, newValue ) )
self.__csMod.setOptionValue( optionPath, newValue )
self.__csModified = True
return S_OK( 'Modified %s' % optionPath )
def setOption( self, optionPath, optionValue ):
"""Create an option at the specified path.
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.setOptionValue( optionPath, optionValue )
self.__csModified = True
return S_OK( 'Created new option %s = %s' % ( optionPath, optionValue ) )
def setOptionComment( self, optionPath, comment ):
"""Create an option at the specified path.
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.setComment( optionPath, comment )
self.__csModified = True
return S_OK( 'Set option comment %s : %s' % ( optionPath, comment ) )
def deleteOption( self, optionPath ):
""" Delete an option
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if not self.__csMod.removeOption( optionPath ):
return S_ERROR( "Couldn't delete option %s" % optionPath )
self.__csModified = True
return S_OK( 'Deleted option %s' % optionPath )
def createSection( self, sectionPath, comment = "" ):
""" Create a new section
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.createSection( sectionPath )
self.__csModified = True
if comment:
self.__csMod.setComment( sectionPath, comment )
return S_OK()
def deleteSection( self, sectionPath ):
""" Delete a section
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if not self.__csMod.removeSection( sectionPath ):
return S_ERROR( "Could not delete section %s " % sectionPath )
self.__csModified = True
return S_OK( )
def copySection( self, originalPath, targetPath ):
""" Copy a whole section to a new location
"""
if not self.__initialized['OK']:
return self.__initialized
cfg = self.__csMod.getCFG( )
sectionCfg = cfg[originalPath]
result = self.createSection( targetPath )
if not result['OK']:
return result
if not self.__csMod.mergeSectionFromCFG( targetPath, sectionCfg ):
return S_ERROR( "Could not merge cfg into section %s" % targetPath )
self.__csModified = True
return S_OK( )
def moveSection( self, originalPath, targetPath ):
""" Move a whole section to a new location
"""
result = self.copySection( originalPath, targetPath )
if not result['OK']:
return result
result = self.delSection( originalPath )
if not result['OK']:
return result
self.__csModified = True
return S_OK()
def copySection( self, originalPath, targetPath ):
""" Copy a whole section to a new location
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
cfg = self.__csMod.getCFG()
sectionCfg = cfg[originalPath]
result = self.createSection( targetPath )
if not result[ 'OK' ]:
return result
if not self.__csMod.mergeSectionFromCFG( targetPath, sectionCfg ):
return S_ERROR( "Could not merge cfg into section %s" % targetPath )
self.__csModified = True
return S_OK()
def moveSection( self, originalPath, targetPath ):
""" Move a whole section to a new location
"""
result = self.copySection( originalPath, targetPath )
if not result['OK']:
return result
result = self.deleteSection( originalPath )
if not result[ 'OK' ]:
return result
self.__csModified = True
return S_OK()
def mergeCFGUnderSection( self, sectionPath, cfg ):
""" Merge the given cfg under a certain section
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
result = self.createSection( sectionPath )
if not result[ 'OK' ]:
return result
if not self.__csMod.mergeSectionFromCFG( sectionPath, cfg ):
return S_ERROR( "Could not merge cfg into section %s" % sectionPath )
self.__csModified = True
return S_OK()
def mergeWithCFG( self, cfg ):
""" Merge the given cfg with the current config
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.mergeFromCFG( cfg )
self.__csModified = True
return S_OK()
def getCurrentCFG( self ):
""" Get the current CFG as it is
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__csMod.getCFG() )
|
coberger/DIRAC
|
ConfigurationSystem/Client/CSAPI.py
|
Python
|
gpl-3.0
| 30,978
|
[
"DIRAC"
] |
b0200d4185a07f85f65c193fd27193ba789f42f0d0fded07ecf7dc17a67a491d
|
''' -- imports from python libraries -- '''
# from datetime import datetime
import datetime
import json
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect # , HttpResponse uncomment when to use
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import render_to_response # , render uncomment when to use
from django.template import RequestContext
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from mongokit import IS
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS, MEDIA_ROOT, GSTUDIO_TASK_TYPES
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.settings import GSTUDIO_SITE_NAME
from gnowsys_ndf.ndf.models import Node, AttributeType, RelationType
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.templatetags.ndf_tags import edit_drawer_widget
from gnowsys_ndf.ndf.views.methods import get_node_common_fields, parse_template_data, get_execution_time, delete_node
from gnowsys_ndf.ndf.views.notify import set_notif_val
from gnowsys_ndf.ndf.views.methods import get_property_order_with_value
from gnowsys_ndf.ndf.views.methods import create_gattribute, create_grelation, create_task
from gnowsys_ndf.notification import models as notification
GST_COURSE = node_collection.one({'_type': "GSystemType", 'name': "Course"})
GST_ACOURSE = node_collection.one({'_type': "GSystemType", 'name': "Announced Course"})
app = GST_COURSE
@get_execution_time
def course(request, group_id, course_id=None):
"""
* Renders a list of all 'courses' available within the database.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app_id = None
app_id = app._id
course_coll = None
all_course_coll = None
ann_course_coll = None
enrolled_course_coll = []
course_enrollment_status = None
app_set_id = None
if course_id is None:
course_ins = node_collection.find_one({'_type': "GSystemType", "name": "Course"})
if course_ins:
course_id = str(course_ins._id)
app_set = node_collection.one({'_type': "GSystemType", 'name': "Announced Course"})
app_set_id = app_set._id
# Course search view
title = GST_COURSE.name
if request.user.id:
course_coll = node_collection.find({'member_of': GST_COURSE._id,'group_set': ObjectId(group_id),'status':u"DRAFT"})
all_course_coll = node_collection.find({'member_of': {'$in': [GST_COURSE._id,GST_ACOURSE._id]},
'group_set': ObjectId(group_id),'status':{'$in':[u"PUBLISHED",u"DRAFT"]}})
auth_node = node_collection.one({'_type': "Author", 'created_by': int(request.user.id)})
'''
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_dict = each["course_enrollment_status"]
course_enrollment_status = [ObjectId(each) for each in course_enrollment_dict]
enrolled_course_coll = node_collection.find({'_id': {'$in': course_enrollment_status}})
'''
ann_course_coll = node_collection.find({'member_of': GST_ACOURSE._id, 'group_set': ObjectId(group_id),'status':u"PUBLISHED"})
return render_to_response("ndf/course.html",
{'title': title,
'app_id': app_id, 'course_gst': GST_COURSE,
'app_set_id': app_set_id,
'searching': True, 'course_coll': course_coll,
'groupid': group_id, 'group_id': group_id,
'all_course_coll': all_course_coll,
'enrolled_course_coll': enrolled_course_coll,
'ann_course_coll': ann_course_coll
},
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def create_edit(request, group_id, node_id=None):
"""Creates/Modifies details about the given quiz-item.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
at_course_type = node_collection.one({'_type': 'AttributeType', 'name': 'nussd_course_type'})
context_variables = {'title': GST_COURSE.name,
'group_id': group_id,
'groupid': group_id
}
if node_id:
course_node = node_collection.one({'_type': u'GSystem', '_id': ObjectId(node_id)})
else:
course_node = node_collection.collection.GSystem()
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(GST_COURSE._id),'group_set': ObjectId(group_id),'status':{"$in":[u"DRAFT",u"PUBLISHED"]}})
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
if request.method == "POST":
# get_node_common_fields(request, course_node, group_id, GST_COURSE)
course_node.save(is_changed=get_node_common_fields(request, course_node, group_id, GST_COURSE))
create_gattribute(course_node._id, at_course_type, u"General")
return HttpResponseRedirect(reverse('course', kwargs={'group_id': group_id}))
else:
if node_id:
context_variables['node'] = course_node
context_variables['groupid'] = group_id
context_variables['group_id'] = group_id
context_variables['app_id'] = app._id
context_variables['nodes_list'] = json.dumps(nodes_list)
return render_to_response("ndf/course_create_edit.html",
context_variables,
context_instance=RequestContext(request)
)
# @login_required
@get_execution_time
def course_detail(request, group_id, _id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
course_structure_exists = False
enrolled_status = False
check_enroll_status = False
title = GST_COURSE.name
course_node = node_collection.one({"_id": ObjectId(_id)})
if course_node.collection_set:
course_structure_exists = True
gs_name = course_node.member_of_names_list[0]
context_variables = {'groupid': group_id,
'group_id': group_id,
'app_id': app._id,
'title': title,
'node': course_node,
'node_type': gs_name
}
if gs_name == "Course":
context_variables["course_structure_exists"] = course_structure_exists
if course_node.relation_set:
for rel in course_node.relation_set:
if "announced_as" in rel:
cnode = node_collection.one({'_id': ObjectId(rel["announced_as"][0])},{'_id':1})
context_variables["acnode"] = str(cnode['_id'])
check_enroll_status = True
break
else:
if course_node.relation_set:
for rel in course_node.relation_set:
if "announced_for" in rel:
cnode = node_collection.one({'_id': ObjectId(rel["announced_for"][0])})
context_variables["cnode"] = cnode
check_enroll_status = True
break
if request.user.id:
if check_enroll_status:
usr_id = int(request.user.id)
auth_node = node_collection.one({'_type': "Author", 'created_by': usr_id})
course_enrollment_status = {}
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_status = each["course_enrollment_status"]
if "acnode" in context_variables:
str_course_id = str(context_variables["acnode"])
else:
str_course_id = str(course_node._id)
if course_enrollment_status:
if str_course_id in course_enrollment_status:
enrolled_status = True
context_variables['enrolled_status'] = enrolled_status
return render_to_response("ndf/course_detail.html",
context_variables,
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def course_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Creates/Modifies document of given sub-types of Course(s).
"""
auth = None
tiss_site = False
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if auth:
group_id = str(auth._id)
else:
pass
if GSTUDIO_SITE_NAME is "TISS":
tiss_site = True
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
# app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
hide_mis_meta_content = True
mis_admin = None
property_order_list = []
template = ""
template_prefix = "mis"
if request.user:
if auth is None:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
agency_type = auth.agency_type
agency_type_node = node_collection.one({
'_type': "GSystemType", 'name': agency_type
}, {
'collection_set': 1
})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(
node_collection.one({
"_id": eachset
}, {
'_id': 1, 'name': 1, 'type_of': 1
})
)
if app_set_id:
course_gst = node_collection.one({
'_type': "GSystemType", '_id': ObjectId(app_set_id)
}, {
'name': 1, 'type_of': 1
})
template = "ndf/" + course_gst.name.strip().lower().replace(' ', '_') \
+ "_create_edit.html"
title = course_gst.name
if app_set_instance_id:
course_gs = node_collection.one({
'_type': "GSystem", '_id': ObjectId(app_set_instance_id)
})
else:
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
property_order_list = get_property_order_with_value(course_gs)
if request.method == "POST":
# [A] Save course-node's base-field(s)
start_time = ""
if "start_time" in request.POST:
start_time = request.POST.get("start_time", "")
start_time = datetime.datetime.strptime(start_time, "%m/%Y")
end_time = ""
if "end_time" in request.POST:
end_time = request.POST.get("end_time", "")
end_time = datetime.datetime.strptime(end_time, "%m/%Y")
nussd_course_type = ""
if "nussd_course_type" in request.POST:
nussd_course_type = request.POST.get("nussd_course_type", "")
nussd_course_type = unicode(nussd_course_type)
unset_ac_options = []
if "unset-ac-options" in request.POST:
unset_ac_options = request.POST.getlist("unset-ac-options")
else:
# Just to execute loop at least once for Course Sub-Types
# other than 'Announced Course'
unset_ac_options = ["dummy"]
if course_gst.name == u"Announced Course":
announce_to_colg_list = request.POST.get(
"announce_to_colg_list", ""
)
announce_to_colg_list = [ObjectId(colg_id) for colg_id in announce_to_colg_list.split(",")]
colg_ids = []
# Parsing ObjectId -- from string format to ObjectId
for each in announce_to_colg_list:
if each and ObjectId.is_valid(each):
colg_ids.append(ObjectId(each))
# Fetching college(s)
colg_list_cur = node_collection.find({
'_id': {'$in': colg_ids}
}, {
'name': 1, 'attribute_set.enrollment_code': 1
})
if "_id" in course_gs:
# It means we are in editing mode of given Announced Course GSystem
unset_ac_options = [course_gs._id]
ac_nc_code_list = []
# Prepare a list
# 0th index (ac_node): Announced Course node,
# 1st index (nc_id): NUSSD Course node's ObjectId,
# 2nd index (nc_course_code): NUSSD Course's code
for cid in unset_ac_options:
ac_node = None
nc_id = None
nc_course_code = ""
# Here course_gst is Announced Course GSytemType's node
ac_node = node_collection.one({
'_id': ObjectId(cid), 'member_of': course_gst._id
})
# If ac_node found, means
# (1) we are dealing with creating Announced Course
# else,
# (2) we are in editing phase of Announced Course
course_node = None
if not ac_node:
# In this case, cid is of NUSSD Course GSystem
# So fetch that to extract course_code
# Set to nc_id
ac_node = None
course_node = node_collection.one({
'_id': ObjectId(cid)
})
else:
# In this case, fetch NUSSD Course from
# Announced Course GSystem's announced_for relationship
for rel in ac_node.relation_set:
if "announced_for" in rel:
course_node_ids = rel["announced_for"]
break
# Fetch NUSSD Course GSystem
if course_node_ids:
course_node = node_collection.find_one({
"_id": {"$in": course_node_ids}
})
# If course_code doesn't exists then
# set NUSSD Course GSystem's name as course_code
if course_node:
nc_id = course_node._id
for attr in course_node.attribute_set:
if "course_code" in attr:
nc_course_code = attr["course_code"]
break
if not nc_course_code:
nc_course_code = course_node.name.replace(" ", "-")
# Append to ac_nc_code_list
ac_nc_code_list.append([ac_node, nc_id, nc_course_code])
# For each selected college
# Create Announced Course GSystem
for college_node in colg_list_cur:
# Fetch Enrollment code from "enrollment_code" (Attribute)
college_enrollment_code = ""
if college_node:
for attr in college_node.attribute_set:
if attr and "enrollment_code" in attr:
college_enrollment_code = attr["enrollment_code"]
break
ann_course_id_list = []
# For each selected course to Announce
for ac_nc_code in ac_nc_code_list:
course_gs = ac_nc_code[0]
nc_id = ac_nc_code[1]
cnode_for_content = node_collection.one({'_id': ObjectId(nc_id)})
nc_course_code = ac_nc_code[2]
if not course_gs:
# Create new Announced Course GSystem
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
if tiss_site:
# Prepare name for Announced Course GSystem
c_name = unicode(
nc_course_code + " - " + college_enrollment_code + " - "
+ start_time.strftime("%b %Y") + " - "
+ end_time.strftime("%b %Y")
)
else:
# Prepare name for Announced Course GSystem
c_name = unicode(
nc_course_code + " - "+ start_time.strftime("%b %Y") + " - "
+ end_time.strftime("%b %Y")
)
request.POST["name"] = c_name
is_changed = get_node_common_fields(
request, course_gs, group_id, course_gst
)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.content_org = cnode_for_content.content_org
course_gs.content = cnode_for_content.html_content
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({
'_id': field_set['_id']
})
field_instance_type = type(field_instance)
if (field_instance_type in
[AttributeType, RelationType]):
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
if field_instance["name"] in ["start_time", "end_time"]:
# Course Duration
field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(course_gs._id, node_collection.collection.AttributeType(field_instance), field_value)
else:
# i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(nc_id)
# Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
# Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(course_gs._id, node_collection.collection.RelationType(field_instance), field_value)
ann_course_id_list.append(course_gs._id)
#commented email notifications to all registered user after announcement
# if not tiss_site:
# site = Site.objects.get(pk=1)
# site = site.name.__str__()
# ann_course_url_link = "http://" + site + "/home/course/course_detail/" + \
# str(course_gs._id)
# user_obj = User.objects.all()
# # Sending email to all registered users on site NROER
# render_label = render_to_string(
# "notification/label.html",
# {"sender": "NROER eCourses",
# "activity": "Course Announcement",
# "conjunction": "-"
# })
# if user_obj:
# notification.create_notice_type(render_label," New eCourse '"\
# + str(course_gs.name) +"' has been announced."\
# +" Visit this link to enroll into this ecourse : " \
# + ann_course_url_link, "notification")
# notification.send(user_obj, render_label, {"from_user": "NROER eCourses"})
else:
is_changed = get_node_common_fields(request, course_gs, group_id, course_gst)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({'_id': field_set['_id']})
field_instance_type = type(field_instance)
if field_instance_type in [AttributeType, RelationType]:
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
# if field_instance["name"] in ["start_time","end_time"]:
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
# elif field_instance["name"] in ["start_enroll", "end_enroll"]: #Student Enrollment DUration
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y")
if field_instance["name"] in ["mast_tr_qualifications", "voln_tr_qualifications"]:
# Needs sepcial kind of parsing
field_value = []
tr_qualifications = request.POST.get(field_instance["name"], '')
if tr_qualifications:
qualifications_dict = {}
tr_qualifications = [qual.strip() for qual in tr_qualifications.split(",")]
for i, qual in enumerate(tr_qualifications):
if (i % 2) == 0:
if qual == "true":
qualifications_dict["mandatory"] = True
elif qual == "false":
qualifications_dict["mandatory"] = False
else:
qualifications_dict["text"] = unicode(qual)
field_value.append(qualifications_dict)
qualifications_dict = {}
elif field_instance["name"] in ["max_marks", "min_marks"]:
# Needed because both these fields' values are dependent upon evaluation_type field's value
evaluation_type = request.POST.get("evaluation_type", "")
if evaluation_type == u"Continuous":
field_value = None
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(
course_gs._id,
node_collection.collection.AttributeType(field_instance),
field_value
)
else:
#i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(cid)
#Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
#Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(
course_gs._id,
node_collection.collection.RelationType(field_instance),
field_value
)
if tiss_site:
return HttpResponseRedirect(
reverse(
app_name.lower() + ":" + template_prefix + '_app_detail',
kwargs={
'group_id': group_id, "app_id": app_id,
"app_set_id": app_set_id
}
)
)
else:
return HttpResponseRedirect(
reverse(
"course",
kwargs={
'group_id': group_id
}
)
)
univ = node_collection.one({
'_type': "GSystemType", 'name': "University"
}, {
'_id': 1
})
university_cur = None
if not mis_admin:
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'_id': 1, 'name': 1, 'group_admin': 1}
)
if tiss_site:
hide_mis_meta_content = False
if univ and mis_admin:
university_cur = node_collection.find(
{'member_of': univ._id, 'group_set': mis_admin._id},
{'name': 1}
).sort('name', 1)
default_template = "ndf/course_create_edit.html"
context_variables = {
'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name,
'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'title': title,
'hide_mis_meta_content':hide_mis_meta_content,
'tiss_site': tiss_site,
'university_cur': university_cur,
'property_order_list': property_order_list
}
if app_set_instance_id:
course_gs.get_neighbourhood(course_gs.member_of)
context_variables['node'] = course_gs
if "Announced Course" in course_gs.member_of_names_list:
for attr in course_gs.attribute_set:
if attr:
for eachk, eachv in attr.items():
context_variables[eachk] = eachv
for rel in course_gs.relation_set:
if rel:
for eachk, eachv in rel.items():
if eachv:
get_node_name = node_collection.one({'_id': eachv[0]})
context_variables[eachk] = get_node_name.name
try:
return render_to_response(
[template, default_template],
context_variables, context_instance=RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseCreateEditViewError: This html template (" \
+ str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseCreateEditViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
@login_required
@get_execution_time
def mis_course_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Detail view of NUSSD Course/ Announced Course
"""
# print "\n Found course_detail n gone inn this...\n\n"
auth = None
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
app_name = app.name
# app_name = "mis"
app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
node = None
property_order_list = []
property_order_list_ac = []
is_link_needed = True # This is required to show Link button on interface that link's Student's/VoluntaryTeacher's node with it's corresponding Author node
template_prefix = "mis"
response_dict = {'success': False}
context_variables = {}
#Course structure collection _dict
course_collection_dict = {}
course_collection_list = []
course_structure_exists = False
if request.user:
if auth is None:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username)})
if auth:
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
if app_set_id:
course_gst = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)}, {'name': 1, 'type_of': 1})
title = course_gst.name
template = "ndf/course_list.html"
query = {}
college = {}
course = {}
ac_data_set = []
records_list = []
if course_gst.name == "Announced Course":
query = {
"member_of": course_gst._id,
"group_set": ObjectId(group_id),
"status": "PUBLISHED",
"attribute_set.ann_course_closure": u"Open",
}
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'course': '$relation_set.announced_for',
'college': '$relation_set.acourse_for_college',
'nussd_course_type': '$attribute_set.nussd_course_type',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = res["result"]
if records_list:
for each in res["result"]:
if each["college"]:
colg_id = each["college"][0][0]
if colg_id not in college:
c = node_collection.one({"_id": colg_id}, {"name": 1, "relation_set.college_affiliated_to": 1})
each["college"] = c.name
each["college_id"] = c._id
college[colg_id] = {}
college[colg_id]["name"] = each["college"]
for rel in c.relation_set:
if rel and "college_affiliated_to" in rel:
univ_id = rel["college_affiliated_to"][0]
u = node_collection.one({"_id": univ_id}, {"name": 1})
each.update({"university": u.name})
college[colg_id]["university"] = each["university"]
college[colg_id]["university_id"] = u._id
each["university_id"] = u._id
else:
each["college"] = college[colg_id]["name"]
each["college_id"] = colg_id
each.update({"university": college[colg_id]["university"]})
each.update({"university_id": college[colg_id]["university_id"]})
if each["course"]:
course_id = each["course"][0][0]
if course_id not in course:
each["course"] = node_collection.one({"_id": course_id}).name
course[course_id] = each["course"]
else:
each["course"] = course[course_id]
ac_data_set.append(each)
column_headers = [
("name", "Announced Course Name"),
("course", "Course Name"),
("nussd_course_type", "Course Type"),
("college", "College"),
("university", "University")
]
else:
query = {
"member_of": course_gst._id,
"group_set": ObjectId(group_id),
}
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'nussd_course_type': '$attribute_set.nussd_course_type',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = res["result"]
if records_list:
for each in res["result"]:
ac_data_set.append(each)
column_headers = [
("ac_id", "Edit"),
("name", "Course Name"),
("nussd_course_type", "Course Type"),
]
response_dict["column_headers"] = column_headers
response_dict["success"] = True
response_dict["students_data_set"] = ac_data_set
response_dict["groupid"] = group_id
response_dict["app_id"] = app_id
response_dict["app_set_id"] = app_set_id
if app_set_instance_id:
template = "ndf/course_details.html"
node = node_collection.one({'_type': "GSystem", '_id': ObjectId(app_set_instance_id)})
property_order_list = get_property_order_with_value(node)
node.get_neighbourhood(node.member_of)
if title == u"Announced Course":
property_order_list_ac = node.attribute_set
# Course structure as list of dicts
if node.collection_set:
course_structure_exists = True
context_variables = { 'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name, 'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'course_gst_name': course_gst.name,
'title': title,
'course_structure_exists': course_structure_exists,
'node': node,
'property_order_list': property_order_list,
'property_order_list_ac': property_order_list_ac,
'is_link_needed': is_link_needed,
'response_dict':json.dumps(response_dict, cls=NodeJSONEncoder)
}
try:
# print "\n template-list: ", [template, default_template]
# template = "ndf/fgh.html"
# default_template = "ndf/dsfjhk.html"
# return render_to_response([template, default_template],
return render_to_response(template,
context_variables,
context_instance = RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseDetailListViewError: This html template (" + str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseDetailListViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
# Ajax views for setting up Course Structure
@login_required
@get_execution_time
def create_course_struct(request, group_id, node_id):
"""
This view is to create the structure of the Course.
A Course holds CourseSection, which further holds CourseSubSection
in their respective collection_set.
A tree depiction to this is as follows:
Course Name:
1. CourseSection1
1.1. CourseSubSection1
1.2. CourseSubSection2
2. CourseSection2
2.1. CourseSubSection3
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app_id = None
app_set_id = None
tiss_site = False
property_order_list_cs = []
property_order_list_css = []
course_structure_exists = False
title = "Course Authoring"
if GSTUDIO_SITE_NAME is "TISS":
tiss_site = True
course_node = node_collection.one({"_id": ObjectId(node_id)})
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_gs = node_collection.collection.GSystem()
cs_gs.member_of.append(cs_gst._id)
property_order_list_cs = get_property_order_with_value(cs_gs)
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_gs = node_collection.collection.GSystem()
css_gs.member_of.append(css_gst._id)
property_order_list_css = get_property_order_with_value(css_gs)
course_collection_list = course_node.collection_set
if course_collection_list:
course_structure_exists = True
# for attr in course_node.attribute_set:
# if attr.has_key("evaluation_type"):
# eval_type = attr["evaluation_type"]
#If evaluation_type flag is True, it is Final. If False, it is Continous
# if(eval_type==u"Final"):
# eval_type_flag = True
# else:
# eval_type_flag = False
if request.method == "GET":
app_id = request.GET.get("app_id", "")
app_set_id = request.GET.get("app_set_id", "")
return render_to_response("ndf/create_course_structure.html",
{'cnode': course_node,
'groupid': group_id,
'group_id': group_id,
'title': title,
'tiss_site':tiss_site,
'app_id': app_id, 'app_set_id': app_set_id,
'property_order_list': property_order_list_cs,
'property_order_list_css': property_order_list_css
},
context_instance=RequestContext(request)
)
@login_required
def save_course_section(request, group_id):
'''
Accepts:
* NUSSD Course/Course node _id
* CourseSection name
Actions:
* Creates CourseSection GSystem with name received.
* Appends this new CourseSection node id into
NUSSD Course/Course collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
cs_node_name = request.POST.get("cs_name", '')
course_node_id = request.POST.get("course_node_id", '')
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_new = node_collection.collection.GSystem()
cs_new.member_of.append(cs_gst._id)
cs_new.name = cs_node_name
cs_new.modified_by = int(request.user.id)
cs_new.created_by = int(request.user.id)
cs_new.contributors.append(int(request.user.id))
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
cs_new.prior_node.append(ObjectId(course_node._id))
cs_new.save()
node_collection.collection.update({'_id': course_node._id}, {'$push': {'collection_set': cs_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["cs_new_id"] = str(cs_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def save_course_sub_section(request, group_id):
'''
Accepts:
* CourseSection node _id
* CourseSubSection name
Actions:
* Creates CourseSubSection GSystem with name received.
* Appends this new CourseSubSection node id into
CourseSection collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSubSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_name = request.POST.get("css_name", '')
cs_node_id = request.POST.get("cs_node_id", '')
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_new = node_collection.collection.GSystem()
css_new.member_of.append(css_gst._id)
# set name
css_new.name = css_node_name
css_new.modified_by = int(request.user.id)
css_new.created_by = int(request.user.id)
css_new.contributors.append(int(request.user.id))
cs_node = node_collection.one({"_id": ObjectId(cs_node_id)})
css_new.prior_node.append(cs_node._id)
css_new.save()
node_collection.collection.update({'_id': cs_node._id}, {'$push': {'collection_set': css_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["css_new_id"] = str(css_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def change_node_name(request, group_id):
'''
Accepts:
* CourseSection/ CourseSubSection node _id
* New name for CourseSection node
Actions:
* Updates received node's name
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
new_name = request.POST.get("new_name", '')
node = node_collection.one({"_id": ObjectId(node_id)})
node.name = new_name.strip()
node.save()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def change_order(request, group_id):
'''
Accepts:
* 2 node ids.
Basically, either of CourseSection or CourseSubSection
* Parent node id
Either a NUSSD Course/Course or CourseSection
Actions:
* Swaps the 2 node ids in the collection set of received
parent node
'''
response_dict = {"success": False}
collection_set_list = []
if request.is_ajax() and request.method == "POST":
node_id_up = request.POST.get("node_id_up", '')
node_id_down = request.POST.get("node_id_down", '')
parent_node_id = request.POST.get("parent_node", '')
parent_node = node_collection.one({"_id": ObjectId(parent_node_id)})
collection_set_list = parent_node.collection_set
a, b = collection_set_list.index(ObjectId(node_id_up)), collection_set_list.index(ObjectId(node_id_down))
collection_set_list[b], collection_set_list[a] = collection_set_list[a], collection_set_list[b]
node_collection.collection.update({'_id': parent_node._id}, {'$set': {'collection_set': collection_set_list }}, upsert=False, multi=False)
parent_node.reload()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def course_sub_section_prop(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* Properties dict
Actions:
* Creates GAttributes with the values of received dict
for the respective CourseSubSection node
Returns:
* success (i.e True/False)
* If request.method is POST, all GAttributes in a dict structure,
'''
response_dict = {"success": False}
if request.is_ajax():
if request.method == "POST":
assessment_flag = False
css_node_id = request.POST.get("css_node_id", '')
prop_dict = request.POST.get("prop_dict", '')
assessment_chk = json.loads(request.POST.get("assessment_chk", ''))
prop_dict = json.loads(prop_dict)
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
at_cs_hours = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_minutes'})
at_cs_assessment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assessment'})
at_cs_assignment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assignment'})
at_cs_min_marks = node_collection.one({'_type': 'AttributeType', 'name': 'min_marks'})
at_cs_max_marks = node_collection.one({'_type': 'AttributeType', 'name': 'max_marks'})
if assessment_chk is True:
create_gattribute(css_node._id, at_cs_assessment, True)
assessment_flag = True
for propk, propv in prop_dict.items():
# add attributes to css gs
if(propk == "course_structure_minutes"):
create_gattribute(css_node._id, at_cs_hours, int(propv))
elif(propk == "course_structure_assignment"):
create_gattribute(css_node._id, at_cs_assignment, propv)
if assessment_flag:
if(propk == "min_marks"):
create_gattribute(css_node._id, at_cs_min_marks, int(propv))
if(propk == "max_marks"):
create_gattribute(css_node._id, at_cs_max_marks, int(propv))
css_node.reload()
response_dict["success"] = True
else:
css_node_id = request.GET.get("css_node_id", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
if css_node.attribute_set:
for each in css_node.attribute_set:
for k, v in each.items():
response_dict[k] = v
response_dict["success"] = True
else:
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
@login_required
def add_units(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* NUSSD Course/Course node _id
Actions:
* Redirects to course_units.html
'''
variable = None
unit_node = None
css_node_id = request.GET.get('css_node_id', '')
unit_node_id = request.GET.get('unit_node_id', '')
course_node_id = request.GET.get('course_node', '')
app_id = request.GET.get('app_id', '')
app_set_id = request.GET.get('app_set_id', '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
title = "Course Units"
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
variable = RequestContext(request, {
'group_id': group_id, 'groupid': group_id,
'css_node': css_node,
'title': title,
'app_set_id': app_set_id,
'app_id': app_id,
'unit_node': unit_node,
'course_node': course_node,
})
template = "ndf/course_units.html"
return render_to_response(template, variable)
@login_required
def get_resources(request, group_id):
'''
Accepts:
* Name of GSystemType (Page, File, etc.)
* CourseSubSection node _id
* widget_for
Actions:
* Fetches all GSystems of selected GSystemType as resources
Returns:
* Returns Drawer with resources
'''
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get('css_node_id', "")
unit_node_id = request.POST.get('unit_node_id', "")
widget_for = request.POST.get('widget_for', "")
resource_type = request.POST.get('resource_type', "")
resource_type = resource_type.strip()
list_resources = []
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
if resource_type:
if resource_type == "Pandora":
resource_type = "Pandora_video"
resource_gst = node_collection.one({'_type': "GSystemType", 'name': resource_type})
res = node_collection.find(
{
'member_of': resource_gst._id,
'group_set': ObjectId(group_id),
'status': u"PUBLISHED"
}
)
for each in res:
list_resources.append(each)
drawer_template_context = edit_drawer_widget("CourseUnits", group_id, unit_node, None, checked="collection_set", left_drawer_content=list_resources)
drawer_template_context["widget_for"] = widget_for
drawer_widget = render_to_string(
'ndf/drawer_widget.html',
drawer_template_context,
context_instance=RequestContext(request)
)
return HttpResponse(drawer_widget)
else:
error_message = "Resource Drawer: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "Resource Drawer: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@login_required
def save_resources(request, group_id):
'''
Accepts:
* List of resources (i.e GSystem of Page, File, etc.)
* CourseSubSection node _id
Actions:
* Sets the received resources in respective node's collection_set
'''
response_dict = {"success": False,"create_new_unit": True}
if request.is_ajax() and request.method == "POST":
list_of_res = json.loads(request.POST.get('list_of_res', ""))
css_node_id = request.POST.get('css_node', "")
unit_name = request.POST.get('unit_name', "")
unit_name = unit_name.strip()
unit_node_id = request.POST.get('unit_node_id', "")
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
list_of_res_ids = [ObjectId(each_res) for each_res in list_of_res]
try:
cu_new = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_new = None
if not cu_new:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_new = node_collection.collection.GSystem()
cu_new.member_of.append(cu_gst._id)
# set name
cu_new.name = unit_name.strip()
cu_new.modified_by = int(request.user.id)
cu_new.created_by = int(request.user.id)
cu_new.contributors.append(int(request.user.id))
cu_new.prior_node.append(css_node._id)
cu_new.save()
response_dict["create_new_unit"] = True
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'name': unit_name }}, upsert=False, multi=False)
if cu_new._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_new._id }}, upsert=False, multi=False)
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'collection_set':list_of_res_ids}},upsert=False,multi=False)
cu_new.reload()
response_dict["success"] = True
response_dict["cu_new_id"] = str(cu_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def create_edit_unit(request, group_id):
'''
Accepts:
* ObjectId of unit node if exists
* ObjectId of CourseSubSection node
Actions:
* Creates/Updates Unit node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get("css_node_id", '')
unit_node_id = request.POST.get("unit_node_id", '')
unit_name = request.POST.get("unit_name", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
cu_node = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_node = None
if cu_node is None:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_node = node_collection.collection.GSystem()
cu_node.member_of.append(cu_gst._id)
# set name
cu_node.name = unit_name.strip()
cu_node.modified_by = int(request.user.id)
cu_node.created_by = int(request.user.id)
cu_node.contributors.append(int(request.user.id))
cu_node.prior_node.append(css_node._id)
cu_node.save()
response_dict["unit_node_id"] = str(cu_node._id)
node_collection.collection.update({'_id': cu_node._id}, {'$set': {'name': unit_name}}, upsert=False, multi=False)
if cu_node._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_node._id}}, upsert=False, multi=False)
return HttpResponse(json.dumps(response_dict))
@login_required
def delete_course(request, group_id, node_id):
del_stat = delete_item(node_id)
if del_stat:
return HttpResponseRedirect(reverse('course', kwargs={'group_id': ObjectId(group_id)}))
@login_required
def delete_from_course_structure(request, group_id):
'''
Accepts:
* ObjectId of node that is to be deleted.
It can be CourseSection/CourseSubSection/CourseUnit
Actions:
* Deletes the received node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
del_stat = False
if request.is_ajax() and request.method == "POST":
oid = request.POST.get("oid", '')
del_stat = delete_item(oid)
if del_stat:
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
def delete_item(item):
node_item = node_collection.one({'_id': ObjectId(item)})
if u"CourseUnit" not in node_item.member_of_names_list and node_item.collection_set:
for each in node_item.collection_set:
d_st = delete_item(each)
del_status, del_status_msg = delete_node(
node_id=node_item._id,
deletion_type=0
)
return del_status
@login_required
def enroll_generic(request, group_id):
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
course_enrollment_status_at = node_collection.one({
"_type": "AttributeType", "name": "course_enrollment_status"
})
node_id = request.POST.get('node_id', '')
usr_id = request.POST.get('usr_id', '')
usr_id = int(usr_id)
auth_node = node_collection.one({'_type': "Author", 'created_by': usr_id})
course_node = node_collection.one({'_id': ObjectId(node_id)})
course_enrollment_status = {}
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_status = each["course_enrollment_status"]
str_course_id = str(course_node._id)
if course_enrollment_status is not None:
if str_course_id not in course_enrollment_status:
course_enrollment_status.update({str_course_id: u"Approved"})
at_node = create_gattribute(auth_node["_id"], course_enrollment_status_at, course_enrollment_status)
response_dict['success'] = True
return HttpResponse(json.dumps(response_dict))
else:
return HttpResponse(json.dumps(response_dict))
@login_required
def remove_resource_from_unit(request, group_id):
'''
Accepts:
* ObjectId of node to be removed from collection_set.
* ObjectId of unit_node.
Actions:
* Removed res_id from unit_node's collection_set
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
unit_node_id = request.POST.get("unit_node_id", '')
res_id = request.POST.get("res_id", '')
unit_node = node_collection.one({'_id': ObjectId(unit_node_id)})
if unit_node.collection_set and res_id:
node_collection.collection.update({'_id': unit_node._id}, {'$pull': {'collection_set': ObjectId(res_id)}}, upsert=False, multi=False)
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
|
olympian94/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/views/course.py
|
Python
|
agpl-3.0
| 64,077
|
[
"VisIt"
] |
4d040cd96c4bbff17a34a2f5ddb48cb105e468f196787073e9678026bb15f6ef
|
# SimpleCV Cameras & Devices
#load system libraries
from SimpleCV.base import *
from SimpleCV.ImageClass import Image, ImageSet, ColorSpace
from SimpleCV.Display import Display
from SimpleCV.Color import Color
from collections import deque
import time
import ctypes as ct
import subprocess
import cv2
import numpy as np
import traceback
import sys
#Globals
_cameras = []
_camera_polling_thread = ""
_index = []
class FrameBufferThread(threading.Thread):
"""
**SUMMARY**
This is a helper thread which continually debuffers the camera frames. If
you don't do this, cameras may constantly give you a frame behind, which
causes problems at low sample rates. This makes sure the frames returned
by your camera are fresh.
"""
def run(self):
global _cameras
while (1):
for cam in _cameras:
if cam.pygame_camera:
cam.pygame_buffer = cam.capture.get_image(cam.pygame_buffer)
else:
cv.GrabFrame(cam.capture)
cam._threadcapturetime = time.time()
time.sleep(0.04) #max 25 fps, if you're lucky
class FrameSource:
"""
**SUMMARY**
An abstract Camera-type class, for handling multiple types of video input.
Any sources of images inheirit from it
"""
_calibMat = "" #Intrinsic calibration matrix
_distCoeff = "" #Distortion matrix
_threadcapturetime = '' #when the last picture was taken
capturetime = '' #timestamp of the last aquired image
def __init__(self):
return
def getProperty(self, p):
return None
def getAllProperties(self):
return {}
def getImage(self):
return None
def calibrate(self, imageList, grid_sz=0.03, dimensions=(8, 5)):
"""
**SUMMARY**
Camera calibration will help remove distortion and fisheye effects
It is agnostic of the imagery source, and can be used with any camera
The easiest way to run calibration is to run the
calibrate.py file under the tools directory for SimpleCV.
This will walk you through the calibration process.
**PARAMETERS**
* *imageList* - is a list of images of color calibration images.
* *grid_sz* - is the actual grid size of the calibration grid, the unit used will be
the calibration unit value (i.e. if in doubt use meters, or U.S. standard)
* *dimensions* - is the the count of the *interior* corners in the calibration grid.
So for a grid where there are 4x4 black grid squares has seven interior corners.
**RETURNS**
The camera's intrinsic matrix.
**EXAMPLE**
See :py:module:calibrate.py
"""
# This routine was adapted from code originally written by:
# Abid. K -- abidrahman2@gmail.com
# See: https://github.com/abidrahmank/OpenCV-Python/blob/master/Other_Examples/camera_calibration.py
warn_thresh = 1
n_boards = 0 #no of boards
board_w = int(dimensions[0]) # number of horizontal corners
board_h = int(dimensions[1]) # number of vertical corners
n_boards = int(len(imageList))
board_n = board_w * board_h # no of total corners
board_sz = (board_w, board_h) #size of board
if( n_boards < warn_thresh ):
logger.warning("FrameSource.calibrate: We suggest using 20 or more images to perform camera calibration!" )
# creation of memory storages
image_points = cv.CreateMat(n_boards * board_n, 2, cv.CV_32FC1)
object_points = cv.CreateMat(n_boards * board_n, 3, cv.CV_32FC1)
point_counts = cv.CreateMat(n_boards, 1, cv.CV_32SC1)
intrinsic_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
distortion_coefficient = cv.CreateMat(5, 1, cv.CV_32FC1)
# capture frames of specified properties and modification of matrix values
i = 0
z = 0 # to print number of frames
successes = 0
imgIdx = 0
# capturing required number of views
while(successes < n_boards):
found = 0
img = imageList[imgIdx]
(found, corners) = cv.FindChessboardCorners(img.getGrayscaleMatrix(), board_sz,
cv.CV_CALIB_CB_ADAPTIVE_THRESH |
cv.CV_CALIB_CB_FILTER_QUADS)
corners = cv.FindCornerSubPix(img.getGrayscaleMatrix(), corners,(11, 11),(-1, -1),
(cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
# if got a good image,draw chess board
if found == 1:
corner_count = len(corners)
z = z + 1
# if got a good image, add to matrix
if len(corners) == board_n:
step = successes * board_n
k = step
for j in range(board_n):
cv.Set2D(image_points, k, 0, corners[j][0])
cv.Set2D(image_points, k, 1, corners[j][1])
cv.Set2D(object_points, k, 0, grid_sz*(float(j)/float(board_w)))
cv.Set2D(object_points, k, 1, grid_sz*(float(j)%float(board_w)))
cv.Set2D(object_points, k, 2, 0.0)
k = k + 1
cv.Set2D(point_counts, successes, 0, board_n)
successes = successes + 1
# now assigning new matrices according to view_count
if( successes < warn_thresh ):
logger.warning("FrameSource.calibrate: You have %s good images for calibration we recommend at least %s" % (successes, warn_thresh))
object_points2 = cv.CreateMat(successes * board_n, 3, cv.CV_32FC1)
image_points2 = cv.CreateMat(successes * board_n, 2, cv.CV_32FC1)
point_counts2 = cv.CreateMat(successes, 1, cv.CV_32SC1)
for i in range(successes * board_n):
cv.Set2D(image_points2, i, 0, cv.Get2D(image_points, i, 0))
cv.Set2D(image_points2, i, 1, cv.Get2D(image_points, i, 1))
cv.Set2D(object_points2, i, 0, cv.Get2D(object_points, i, 0))
cv.Set2D(object_points2, i, 1, cv.Get2D(object_points, i, 1))
cv.Set2D(object_points2, i, 2, cv.Get2D(object_points, i, 2))
for i in range(successes):
cv.Set2D(point_counts2, i, 0, cv.Get2D(point_counts, i, 0))
cv.Set2D(intrinsic_matrix, 0, 0, 1.0)
cv.Set2D(intrinsic_matrix, 1, 1, 1.0)
rcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
tcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
# camera calibration
cv.CalibrateCamera2(object_points2, image_points2, point_counts2,
(img.width, img.height), intrinsic_matrix,distortion_coefficient,
rcv, tcv, 0)
self._calibMat = intrinsic_matrix
self._distCoeff = distortion_coefficient
return intrinsic_matrix
def getCameraMatrix(self):
"""
**SUMMARY**
This function returns a cvMat of the camera's intrinsic matrix.
If there is no matrix defined the function returns None.
"""
return self._calibMat
def undistort(self, image_or_2darray):
"""
**SUMMARY**
If given an image, apply the undistortion given by the camera's matrix and return the result.
If given a 1xN 2D cvmat or a 2xN numpy array, it will un-distort points of
measurement and return them in the original coordinate system.
**PARAMETERS**
* *image_or_2darray* - an image or an ndarray.
**RETURNS**
The undistorted image or the undistorted points. If the camera is un-calibrated
we return None.
**EXAMPLE**
>>> img = cam.getImage()
>>> result = cam.undistort(img)
"""
if(type(self._calibMat) != cv.cvmat or type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.undistort: This operation requires calibration, please load the calibration matrix")
return None
if (type(image_or_2darray) == InstanceType and image_or_2darray.__class__ == Image):
inImg = image_or_2darray # we have an image
retVal = inImg.getEmpty()
cv.Undistort2(inImg.getBitmap(), retVal, self._calibMat, self._distCoeff)
return Image(retVal)
else:
mat = ''
if (type(image_or_2darray) == cv.cvmat):
mat = image_or_2darray
else:
arr = cv.fromarray(np.array(image_or_2darray))
mat = cv.CreateMat(cv.GetSize(arr)[1], 1, cv.CV_64FC2)
cv.Merge(arr[:, 0], arr[:, 1], None, None, mat)
upoints = cv.CreateMat(cv.GetSize(mat)[1], 1, cv.CV_64FC2)
cv.UndistortPoints(mat, upoints, self._calibMat, self._distCoeff)
#undistorted.x = (x* focalX + principalX);
#undistorted.y = (y* focalY + principalY);
return (np.array(upoints[:, 0]) *\
[self.getCameraMatrix()[0, 0], self.getCameraMatrix()[1, 1]] +\
[self.getCameraMatrix()[0, 2], self.getCameraMatrix()[1, 2]])[:, 0]
def getImageUndistort(self):
"""
**SUMMARY**
Using the overridden getImage method we retrieve the image and apply the undistortion
operation.
**RETURNS**
The latest image from the camera after applying undistortion.
**EXAMPLE**
>>> cam = Camera()
>>> cam.loadCalibration("mycam.xml")
>>> while True:
>>> img = cam.getImageUndistort()
>>> img.show()
"""
return self.undistort(self.getImage())
def saveCalibration(self, filename):
"""
**SUMMARY**
Save the calibration matrices to file. The file name should be without the extension.
The default extension is .xml.
**PARAMETERS**
* *filename* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was saved , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
if( type(self._calibMat) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration matrix present, can't save.")
else:
intrFName = filename + "Intrinsic.xml"
cv.Save(intrFName, self._calibMat)
if( type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration distortion present, can't save.")
else:
distFName = filename + "Distortion.xml"
cv.Save(distFName, self._distCoeff)
return None
def loadCalibration(self, filename):
"""
**SUMMARY**
Load a calibration matrix from file.
The filename should be the stem of the calibration files names.
e.g. If the calibration files are MyWebcamIntrinsic.xml and MyWebcamDistortion.xml
then load the calibration file "MyWebcam"
**PARAMETERS**
* *filename* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was loaded , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
retVal = False
intrFName = filename + "Intrinsic.xml"
self._calibMat = cv.Load(intrFName)
distFName = filename + "Distortion.xml"
self._distCoeff = cv.Load(distFName)
if( type(self._distCoeff) == cv.cvmat
and type(self._calibMat) == cv.cvmat):
retVal = True
return retVal
def live(self):
"""
**SUMMARY**
This shows a live view of the camera.
**EXAMPLE**
To use it's as simple as:
>>> cam = Camera()
>>> cam.live()
Left click will show mouse coordinates and color
Right click will kill the live image
"""
start_time = time.time()
from SimpleCV.Display import Display
i = self.getImage()
d = Display(i.size())
i.save(d)
col = Color.RED
while d.isNotDone():
i = self.getImage()
elapsed_time = time.time() - start_time
if d.mouseLeft:
txt = "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + ")"
i.dl().text(txt, (10,i.height / 2), color=col)
txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY))
i.dl().text(txt, (10,(i.height / 2) + 10), color=col)
print "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY))
if elapsed_time > 0 and elapsed_time < 5:
i.dl().text("In live mode", (10,10), color=col)
i.dl().text("Left click will show mouse coordinates and color", (10,20), color=col)
i.dl().text("Right click will kill the live image", (10,30), color=col)
i.save(d)
if d.mouseRight:
print "Closing Window"
d.done = True
pg.quit()
class Camera(FrameSource):
"""
**SUMMARY**
The Camera class is the class for managing input from a basic camera. Note
that once the camera is initialized, it will be locked from being used
by other processes. You can check manually if you have compatible devices
on linux by looking for /dev/video* devices.
This class wrappers OpenCV's cvCapture class and associated methods.
Read up on OpenCV's CaptureFromCAM method for more details if you need finer
control than just basic frame retrieval
"""
capture = "" #cvCapture object
thread = ""
pygame_camera = False
pygame_buffer = ""
prop_map = {"width": cv.CV_CAP_PROP_FRAME_WIDTH,
"height": cv.CV_CAP_PROP_FRAME_HEIGHT,
"brightness": cv.CV_CAP_PROP_BRIGHTNESS,
"contrast": cv.CV_CAP_PROP_CONTRAST,
"saturation": cv.CV_CAP_PROP_SATURATION,
"hue": cv.CV_CAP_PROP_HUE,
"gain": cv.CV_CAP_PROP_GAIN,
"exposure": cv.CV_CAP_PROP_EXPOSURE}
#human readable to CV constant property mapping
def __init__(self, camera_index = -1, prop_set = {}, threaded = True, calibrationfile = ''):
global _cameras
global _camera_polling_thread
global _index
"""
**SUMMARY**
In the camera constructor, camera_index indicates which camera to connect to
and props is a dictionary which can be used to set any camera attributes
Supported props are currently: height, width, brightness, contrast,
saturation, hue, gain, and exposure.
You can also specify whether you want the FrameBufferThread to continuously
debuffer the camera. If you specify True, the camera is essentially 'on' at
all times. If you specify off, you will have to manage camera buffers.
**PARAMETERS**
* *camera_index* - The index of the camera, these go from 0 upward, and are system specific.
* *prop_set* - The property set for the camera (i.e. a dict of camera properties).
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
* *threaded* - If True we constantly debuffer the camera, otherwise the user
must do this manually.
* *calibrationfile* - A calibration file to load.
"""
self.index = None
self.threaded = False
self.capture = None
if platform.system() == "Linux" and -1 in _index and camera_index != -1 and camera_index not in _index:
process = subprocess.Popen(["lsof /dev/video"+str(camera_index)],shell=True,stdout=subprocess.PIPE)
data = process.communicate()
if data[0]:
camera_index = -1
elif platform.system() == "Linux" and camera_index == -1 and -1 not in _index:
process = subprocess.Popen(["lsof /dev/video*"],shell=True,stdout=subprocess.PIPE)
data = process.communicate()
if data[0]:
camera_index = int(data[0].split("\n")[1].split()[-1][-1])
for cam in _cameras:
if camera_index == cam.index:
self.threaded = cam.threaded
self.capture = cam.capture
self.index = cam.index
_cameras.append(self)
return
#This is to add support for XIMEA cameras.
if isinstance(camera_index, str):
if camera_index.lower() == 'ximea':
camera_index = 1100
_index.append(camera_index)
self.capture = cv.CaptureFromCAM(camera_index) #This fixes bug with opencv not being able to grab frames from webcams on linux
self.index = camera_index
if "delay" in prop_set:
time.sleep(prop_set['delay'])
if platform.system() == "Linux" and (prop_set.has_key("height") or cv.GrabFrame(self.capture) == False):
import pygame.camera
pygame.camera.init()
threaded = True #pygame must be threaded
if camera_index == -1:
camera_index = 0
self.index = camera_index
_index.append(camera_index)
print _index
if(prop_set.has_key("height") and prop_set.has_key("width")):
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index), (prop_set['width'], prop_set['height']))
else:
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index))
try:
self.capture.start()
except Exception as exc:
msg = "caught exception: %r" % exc
logger.warning(msg)
logger.warning("SimpleCV can't seem to find a camera on your system, or the drivers do not work with SimpleCV.")
return
time.sleep(0)
self.pygame_buffer = self.capture.get_image()
self.pygame_camera = True
else:
_index.append(camera_index)
self.threaded = False
if (platform.system() == "Windows"):
threaded = False
if (not self.capture):
return None
#set any properties in the constructor
for p in prop_set.keys():
if p in self.prop_map:
cv.SetCaptureProperty(self.capture, self.prop_map[p], prop_set[p])
if (threaded):
self.threaded = True
_cameras.append(self)
if (not _camera_polling_thread):
_camera_polling_thread = FrameBufferThread()
_camera_polling_thread.daemon = True
_camera_polling_thread.start()
time.sleep(0) #yield to thread
if calibrationfile:
self.loadCalibration(calibrationfile)
#todo -- make these dynamic attributes of the Camera class
def getProperty(self, prop):
"""
**SUMMARY**
Retrieve the value of a given property, wrapper for cv.GetCaptureProperty
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
**PARAMETERS**
* *prop* - The property to retrive.
**RETURNS**
The specified property. If it can't be found the method returns False.
**EXAMPLE**
>>> cam = Camera()
>>> prop = cam.getProperty("width")
"""
if self.pygame_camera:
if prop.lower() == 'width':
return self.capture.get_size()[0]
elif prop.lower() == 'height':
return self.capture.get_size()[1]
else:
return False
if prop in self.prop_map:
return cv.GetCaptureProperty(self.capture, self.prop_map[prop])
return False
def getAllProperties(self):
"""
**SUMMARY**
Return all properties from the camera.
**RETURNS**
A dict of all the camera properties.
"""
if self.pygame_camera:
return False
props = {}
for p in self.prop_map:
props[p] = self.getProperty(p)
return props
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera. If you experience problems
with stale frames from the camera's hardware buffer, increase the flushcache
number to dequeue multiple frames before retrieval
We're working on how to solve this problem.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = Camera()
>>> while True:
>>> cam.getImage().show()
"""
if self.pygame_camera:
return Image(self.pygame_buffer.copy())
if (not self.threaded):
cv.GrabFrame(self.capture)
self.capturetime = time.time()
else:
self.capturetime = self._threadcapturetime
frame = cv.RetrieveFrame(self.capture)
newimg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
cv.Copy(frame, newimg)
return Image(newimg, self)
class VirtualCamera(FrameSource):
"""
**SUMMARY**
The virtual camera lets you test algorithms or functions by providing
a Camera object which is not a physically connected device.
Currently, VirtualCamera supports "image", "imageset" and "video" source types.
**USAGE**
* For image, pass the filename or URL to the image
* For the video, the filename
* For imageset, you can pass either a path or a list of [path, extension]
* For directory you treat a directory to show the latest file, an example would be where a security camera logs images to the directory, calling .getImage() will get the latest in the directory
"""
source = ""
sourcetype = ""
lastmtime = 0
def __init__(self, s, st, start=1):
"""
**SUMMARY**
The constructor takes a source, and source type.
**PARAMETERS**
* *s* - the source of the imagery.
* *st* - the type of the virtual camera. Valid strings include:
* *start* - the number of the frame that you want to start with.
* "image" - a single still image.
* "video" - a video file.
* "imageset" - a SimpleCV image set.
* "directory" - a VirtualCamera for loading a directory
**EXAMPLE**
>>> vc = VirtualCamera("img.jpg", "image")
>>> vc = VirtualCamera("video.mpg", "video")
>>> vc = VirtualCamera("./path_to_images/", "imageset")
>>> vc = VirtualCamera("video.mpg", "video", 300)
>>> vc = VirtualCamera("./imgs", "directory")
"""
self.source = s
self.sourcetype = st
self.counter = 0
if start==0:
start=1
self.start = start
if self.sourcetype not in ["video", "image", "imageset", "directory"]:
print 'Error: In VirtualCamera(), Incorrect Source option. "%s" \nUsage:' % self.sourcetype
print '\tVirtualCamera("filename","video")'
print '\tVirtualCamera("filename","image")'
print '\tVirtualCamera("./path_to_images","imageset")'
print '\tVirtualCamera("./path_to_images","directory")'
return None
else:
if isinstance(self.source,str) and not os.path.exists(self.source):
print 'Error: In VirtualCamera()\n\t"%s" was not found.' % self.source
return None
if (self.sourcetype == "imageset"):
if( isinstance(s,ImageSet) ):
self.source = s
elif( isinstance(s,(list,str)) ):
self.source = ImageSet()
if (isinstance(s,list)):
self.source.load(*s)
else:
self.source.load(s)
else:
warnings.warn('Virtual Camera is unable to figure out the contents of your ImageSet, it must be a directory, list of directories, or an ImageSet object')
elif (self.sourcetype == 'video'):
self.capture = cv.CaptureFromFile(self.source)
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
elif (self.sourcetype == 'directory'):
pass
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the virtual camera.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = VirtualCamera()
>>> while True:
>>> cam.getImage().show()
"""
if (self.sourcetype == 'image'):
self.counter = self.counter + 1
return Image(self.source, self)
elif (self.sourcetype == 'imageset'):
print len(self.source)
img = self.source[self.counter % len(self.source)]
self.counter = self.counter + 1
return img
elif (self.sourcetype == 'video'):
# cv.QueryFrame returns None if the video is finished
frame = cv.QueryFrame(self.capture)
if frame:
img = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
cv.Copy(frame, img)
return Image(img, self)
else:
return None
elif (self.sourcetype == 'directory'):
img = self.findLastestImage(self.source, 'bmp')
self.counter = self.counter + 1
return Image(img, self)
def rewind(self, start=None):
"""
**SUMMARY**
Rewind the Video source back to the given frame.
Available for only video sources.
**PARAMETERS**
start - the number of the frame that you want to rewind to.
if not provided, the video source would be rewound
to the starting frame number you provided or rewound
to the beginning.
**RETURNS**
None
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.rewind()
"""
if (self.sourcetype == 'video'):
if not start:
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
else:
if start==0:
start=1
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, start-1)
else:
self.counter = 0
def getFrame(self, frame):
"""
**SUMMARY**
Get the provided numbered frame from the video source.
Available for only video sources.
**PARAMETERS**
frame - the number of the frame
**RETURNS**
Image
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> cam.getFrame(400).show()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, frame-1)
img = self.getImage()
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, number_frame)
return img
elif (self.sourcetype == 'imageset'):
img = None
if( frame < len(self.source)):
img = self.source[frame]
return img
else:
return None
def skipFrames(self, n):
"""
**SUMMARY**
Skip n number of frames.
Available for only video sources.
**PARAMETERS**
n - number of frames to be skipped.
**RETURNS**
None
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getImage().show()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, number_frame + n - 1)
elif (self.sourcetype == 'imageset'):
self.counter = (self.counter + n) % len(self.source)
else:
self.counter = self.counter + n
def getFrameNumber(self):
"""
**SUMMARY**
Get the current frame number of the video source.
Available for only video sources.
**RETURNS**
* *int* - number of the frame
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getFrameNumber()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
return number_frame
else:
return self.counter
def getCurrentPlayTime(self):
"""
**SUMMARY**
Get the current play time in milliseconds of the video source.
Available for only video sources.
**RETURNS**
* *int* - milliseconds of time from beginning of file.
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getCurrentPlayTime()
"""
if (self.sourcetype == 'video'):
milliseconds = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_MSEC))
return milliseconds
else:
raise ValueError('sources other than video do not have play time property')
def findLastestImage(self, directory='.', extension='png'):
"""
**SUMMARY**
This function finds the latest file in a directory
with a given extension.
**PARAMETERS**
directory - The directory you want to load images from (defaults to current directory)
extension - The image extension you want to use (defaults to .png)
**RETURNS**
The filename of the latest image
**USAGE**
>>> cam = VirtualCamera('imgs/', 'png') #find all .png files in 'img' directory
>>> cam.getImage() # Grab the latest image from that directory
"""
max_mtime = 0
max_dir = None
max_file = None
max_full_path = None
for dirname,subdirs,files in os.walk(directory):
for fname in files:
if fname.split('.')[-1] == extension:
full_path = os.path.join(dirname, fname)
mtime = os.stat(full_path).st_mtime
if mtime > max_mtime:
max_mtime = mtime
max_dir = dirname
max_file = fname
self.lastmtime = mtime
max_full_path = os.path.abspath(os.path.join(dirname, fname))
#if file is being written, block until mtime is at least 100ms old
while time.mktime(time.localtime()) - os.stat(max_full_path).st_mtime < 0.1:
time.sleep(0)
return max_full_path
class Kinect(FrameSource):
"""
**SUMMARY**
This is an experimental wrapper for the Freenect python libraries
you can getImage() and getDepth() for separate channel images
"""
def __init__(self, device_number=0):
"""
**SUMMARY**
In the kinect contructor, device_number indicates which kinect to
connect to. It defaults to 0.
**PARAMETERS**
* *device_number* - The index of the kinect, these go from 0 upward.
"""
self.deviceNumber = device_number
if not FREENECT_ENABLED:
logger.warning("You don't seem to have the freenect library installed. This will make it hard to use a Kinect.")
#this code was borrowed from
#https://github.com/amiller/libfreenect-goodies
def getImage(self):
"""
**SUMMARY**
This method returns the Kinect camera image.
**RETURNS**
The Kinect's color camera image.
**EXAMPLE**
>>> k = Kinect()
>>> while True:
>>> k.getImage().show()
"""
video = freenect.sync_get_video(self.deviceNumber)[0]
self.capturetime = time.time()
#video = video[:, :, ::-1] # RGB -> BGR
return Image(video.transpose([1,0,2]), self)
#low bits in this depth are stripped so it fits in an 8-bit image channel
def getDepth(self):
"""
**SUMMARY**
This method returns the Kinect depth image.
**RETURNS**
The Kinect's depth camera image as a grayscale image.
**EXAMPLE**
>>> k = Kinect()
>>> while True:
>>> d = k.getDepth()
>>> img = k.getImage()
>>> result = img.sideBySide(d)
>>> result.show()
"""
depth = freenect.sync_get_depth(self.deviceNumber)[0]
self.capturetime = time.time()
np.clip(depth, 0, 2**10 - 1, depth)
depth >>= 2
depth = depth.astype(np.uint8).transpose()
return Image(depth, self)
#we're going to also support a higher-resolution (11-bit) depth matrix
#if you want to actually do computations with the depth
def getDepthMatrix(self):
self.capturetime = time.time()
return freenect.sync_get_depth(self.deviceNumber)[0]
class JpegStreamReader(threading.Thread):
"""
**SUMMARY**
A Threaded class for pulling down JPEG streams and breaking up the images. This
is handy for reading the stream of images from a IP CAmera.
"""
url = ""
currentframe = ""
_threadcapturetime = ""
def run(self):
f = ''
if re.search('@', self.url):
authstuff = re.findall('//(\S+)@', self.url)[0]
self.url = re.sub("//\S+@", "//", self.url)
user, password = authstuff.split(":")
#thank you missing urllib2 manual
#http://www.voidspace.org.uk/python/articles/urllib2.shtml#id5
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self.url, user, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
f = opener.open(self.url)
else:
f = urllib2.urlopen(self.url)
headers = f.info()
if (headers.has_key("content-type")):
headers['Content-type'] = headers['content-type'] #force ucase first char
if not headers.has_key("Content-type"):
logger.warning("Tried to load a JpegStream from " + self.url + ", but didn't find a content-type header!")
return
(multipart, boundary) = headers['Content-type'].split("boundary=")
if not re.search("multipart", multipart, re.I):
logger.warning("Tried to load a JpegStream from " + self.url + ", but the content type header was " + multipart + " not multipart/replace!")
return
buff = ''
data = f.readline().strip()
length = 0
contenttype = "jpeg"
#the first frame contains a boundarystring and some header info
while (1):
#print data
if (re.search(boundary, data.strip()) and len(buff)):
#we have a full jpeg in buffer. Convert to an image
if contenttype == "jpeg":
self.currentframe = buff
self._threadcapturetime = time.time()
buff = ''
if (re.match("Content-Type", data, re.I)):
#set the content type, if provided (default to jpeg)
(header, typestring) = data.split(":")
(junk, contenttype) = typestring.strip().split("/")
if (re.match("Content-Length", data, re.I)):
#once we have the content length, we know how far to go jfif
(header, length) = data.split(":")
length = int(length.strip())
if (re.search("JFIF", data, re.I) or re.search("\xff\xd8\xff\xdb", data) or len(data) > 55):
# we have reached the start of the image
buff = ''
if length and length > len(data):
buff += data + f.read(length - len(data)) #read the remainder of the image
if contenttype == "jpeg":
self.currentframe = buff
self._threadcapturetime = time.time()
else:
while (not re.search(boundary, data)):
buff += data
data = f.readline()
endimg, junk = data.split(boundary)
buff += endimg
data = boundary
continue
data = f.readline() #load the next (header) line
time.sleep(0) #let the other threads go
class JpegStreamCamera(FrameSource):
"""
**SUMMARY**
The JpegStreamCamera takes a URL of a JPEG stream and treats it like a camera. The current frame can always be accessed with getImage()
Requires the Python Imaging Library: http://www.pythonware.com/library/pil/handbook/index.htm
**EXAMPLE**
Using your Android Phone as a Camera. Softwares like IP Webcam can be used.
>>> cam = JpegStreamCamera("http://192.168.65.101:8080/videofeed") # your IP may be different.
>>> img = cam.getImage()
>>> img.show()
"""
url = ""
camthread = ""
def __init__(self, url):
if not PIL_ENABLED:
logger.warning("You need the Python Image Library (PIL) to use the JpegStreamCamera")
return
if not url.startswith('http://'):
url = "http://" + url
self.url = url
self.camthread = JpegStreamReader()
self.camthread.url = self.url
self.camthread.daemon = True
self.camthread.start()
def getImage(self):
"""
**SUMMARY**
Return the current frame of the JpegStream being monitored
"""
if not self.camthread._threadcapturetime:
now = time.time()
while not self.camthread._threadcapturetime:
if time.time() - now > 5:
warnings.warn("Timeout fetching JpegStream at " + self.url)
return
time.sleep(0.1)
self.capturetime = self.camthread._threadcapturetime
return Image(pil.open(StringIO(self.camthread.currentframe)), self)
_SANE_INIT = False
class Scanner(FrameSource):
"""
**SUMMARY**
The Scanner lets you use any supported SANE-compatable scanner as a SimpleCV camera
List of supported devices: http://www.sane-project.org/sane-supported-devices.html
Requires the PySANE wrapper for libsane. The sane scanner object
is available for direct manipulation at Scanner.device
This scanner object is heavily modified from
https://bitbucket.org/DavidVilla/pysane
Constructor takes an index (default 0) and a list of SANE options
(default is color mode).
**EXAMPLE**
>>> scan = Scanner(0, { "mode": "gray" })
>>> preview = scan.getPreview()
>>> stuff = preview.findBlobs(minsize = 1000)
>>> topleft = (np.min(stuff.x()), np.min(stuff.y()))
>>> bottomright = (np.max(stuff.x()), np.max(stuff.y()))
>>> scan.setROI(topleft, bottomright)
>>> scan.setProperty("resolution", 1200) #set high resolution
>>> scan.setProperty("mode", "color")
>>> img = scan.getImage()
>>> scan.setROI() #reset region of interest
>>> img.show()
"""
usbid = None
manufacturer = None
model = None
kind = None
device = None
max_x = None
max_y = None
def __init__(self, id = 0, properties = { "mode": "color"}):
global _SANE_INIT
import sane
if not _SANE_INIT:
try:
sane.init()
_SANE_INIT = True
except:
warn("Initializing pysane failed, do you have pysane installed?")
return
devices = sane.get_devices()
if not len(devices):
warn("Did not find a sane-compatable device")
return
self.usbid, self.manufacturer, self.model, self.kind = devices[id]
self.device = sane.open(self.usbid)
self.max_x = self.device.br_x
self.max_y = self.device.br_y #save our extents for later
for k, v in properties.items():
setattr(self.device, k, v)
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the scanner. Any ROI set with
setROI() is taken into account.
**RETURNS**
A SimpleCV Image. Note that whatever the scanner mode is,
SimpleCV will return a 3-channel, 8-bit image.
**EXAMPLES**
>>> scan = Scanner()
>>> scan.getImage().show()
"""
return Image(self.device.scan())
def getPreview(self):
"""
**SUMMARY**
Retrieve a preview-quality Image-object from the scanner.
**RETURNS**
A SimpleCV Image. Note that whatever the scanner mode is,
SimpleCV will return a 3-channel, 8-bit image.
**EXAMPLES**
>>> scan = Scanner()
>>> scan.getPreview().show()
"""
self.preview = True
img = Image(self.device.scan())
self.preview = False
return img
def getAllProperties(self):
"""
**SUMMARY**
Return a list of all properties and values from the scanner
**RETURNS**
Dictionary of active options and values. Inactive options appear
as "None"
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getAllProperties()
"""
props = {}
for prop in self.device.optlist:
val = None
if hasattr(self.device, prop):
val = getattr(self.device, prop)
props[prop] = val
return props
def printProperties(self):
"""
**SUMMARY**
Print detailed information about the SANE device properties
**RETURNS**
Nothing
**EXAMPLES**
>>> scan = Scanner()
>>> scan.printProperties()
"""
for prop in self.device.optlist:
try:
print self.device[prop]
except:
pass
def getProperty(self, prop):
"""
**SUMMARY**
Returns a single property value from the SANE device
equivalent to Scanner.device.PROPERTY
**RETURNS**
Value for option or None if missing/inactive
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getProperty('mode')
color
"""
if hasattr(self.device, prop):
return getattr(self.device, prop)
return None
def setROI(self, topleft = (0,0), bottomright = (-1,-1)):
"""
**SUMMARY**
Sets an ROI for the scanner in the current resolution. The
two parameters, topleft and bottomright, will default to the
device extents, so the ROI can be reset by calling setROI with
no parameters.
The ROI is set by SANE in resolution independent units (default
MM) so resolution can be changed after ROI has been set.
**RETURNS**
None
**EXAMPLES**
>>> scan = Scanner()
>>> scan.setROI((50, 50), (100,100))
>>> scan.getImage().show() # a very small crop on the scanner
"""
self.device.tl_x = self.px2mm(topleft[0])
self.device.tl_y = self.px2mm(topleft[1])
if bottomright[0] == -1:
self.device.br_x = self.max_x
else:
self.device.br_x = self.px2mm(bottomright[0])
if bottomright[1] == -1:
self.device.br_y = self.max_y
else:
self.device.br_y = self.px2mm(bottomright[1])
def setProperty(self, prop, val):
"""
**SUMMARY**
Assigns a property value from the SANE device
equivalent to Scanner.device.PROPERTY = VALUE
**RETURNS**
None
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getProperty('mode')
color
>>> scan.setProperty("mode") = "gray"
"""
setattr(self.device, prop, val)
def px2mm(self, pixels = 1):
"""
**SUMMARY**
Helper function to convert native scanner resolution to millimeter units
**RETURNS**
Float value
**EXAMPLES**
>>> scan = Scanner()
>>> scan.px2mm(scan.device.resolution) #return DPI in DPMM
"""
return float(pixels * 25.4 / float(self.device.resolution))
class DigitalCamera(FrameSource):
"""
**SUMMARY**
The DigitalCamera takes a point-and-shoot camera or high-end slr and uses it as a Camera. The current frame can always be accessed with getPreview()
Requires the PiggyPhoto Library: https://github.com/alexdu/piggyphoto
**EXAMPLE**
>>> cam = DigitalCamera()
>>> pre = cam.getPreview()
>>> pre.findBlobs().show()
>>>
>>> img = cam.getImage()
>>> img.show()
"""
camera = None
usbid = None
device = None
def __init__(self, id = 0):
try:
import piggyphoto
except:
warn("Initializing piggyphoto failed, do you have piggyphoto installed?")
return
devices = piggyphoto.cameraList(autodetect=True).toList()
if not len(devices):
warn("No compatible digital cameras attached")
return
self.device, self.usbid = devices[id]
self.camera = piggyphoto.camera()
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera with the highest quality possible.
**RETURNS**
A SimpleCV Image.
**EXAMPLES**
>>> cam = DigitalCamera()
>>> cam.getImage().show()
"""
fd, path = tempfile.mkstemp()
self.camera.capture_image(path)
img = Image(path)
os.close(fd)
os.remove(path)
return img
def getPreview(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera with the preview quality from the camera.
**RETURNS**
A SimpleCV Image.
**EXAMPLES**
>>> cam = DigitalCamera()
>>> cam.getPreview().show()
"""
fd, path = tempfile.mkstemp()
self.camera.capture_preview(path)
img = Image(path)
os.close(fd)
os.remove(path)
return img
class ScreenCamera():
"""
**SUMMARY**
ScreenCapture is a camera class would allow you to capture all or part of the screen and return it as a color image.
Requires the pyscreenshot Library: https://github.com/vijaym123/pyscreenshot
**EXAMPLE**
>>> sc = ScreenCamera()
>>> res = sc.getResolution()
>>> print res
>>>
>>> img = sc.getImage()
>>> img.show()
"""
_roi = None
def __init__(self):
if not PYSCREENSHOT_ENABLED:
warn("Initializing pyscreenshot failed. Install pyscreenshot from https://github.com/vijaym123/pyscreenshot")
return None
def getResolution(self):
"""
**DESCRIPTION**
returns the resolution of the screenshot of the screen.
**PARAMETERS**
None
**RETURNS**
returns the resolution.
**EXAMPLE**
>>> img = ScreenCamera()
>>> res = img.getResolution()
>>> print res
"""
return Image(pyscreenshot.grab()).size()
def setROI(self,roi):
"""
**DESCRIPTION**
To set the region of interest.
**PARAMETERS**
* *roi* - tuple - It is a tuple of size 4. where region of interest is to the center of the screen.
**RETURNS**
None
**EXAMPLE**
>>> sc = ScreenCamera()
>>> res = sc.getResolution()
>>> sc.setROI(res[0]/4,res[1]/4,res[0]/2,res[1]/2)
>>> img = sc.getImage()
>>> s.show()
"""
if isinstance(roi,tuple) and len(roi)==4:
self._roi = roi
return
def getImage(self):
"""
**DESCRIPTION**
getImage function returns a Image object capturing the current screenshot of the screen.
**PARAMETERS**
None
**RETURNS**
Returns the region of interest if setROI is used.
else returns the original capture of the screenshot.
**EXAMPLE**
>>> sc = ScreenCamera()
>>> img = sc.getImage()
>>> img.show()
"""
img = Image(pyscreenshot.grab())
try :
if self._roi :
img = img.crop(self._roi,centered=True)
except :
print "Error croping the image. ROI specified is not correct."
return None
return img
class StereoImage:
"""
**SUMMARY**
This class is for binaculor Stereopsis. That is exactrating 3D information from two differing views of a scene(Image). By comparing the two images, the relative depth information can be obtained.
- Fundamental Matrix : F : a 3 x 3 numpy matrix, is a relationship between any two images of the same scene that constrains where the projection of points from the scene can occur in both images. see : http://en.wikipedia.org/wiki/Fundamental_matrix_(computer_vision)
- Homography Matrix : H : a 3 x 3 numpy matrix,
- ptsLeft : The matched points on the left image.
- ptsRight : The matched points on the right image.
-findDisparityMap and findDepthMap - provides 3D information.
for more information on stereo vision, visit : http://en.wikipedia.org/wiki/Computer_stereo_vision
**EXAMPLE**
>>> img1 = Image('sampleimages/stereo_view1.png')
>>> img2 = Image('sampleimages/stereo_view2.png')
>>> stereoImg = StereoImage(img1,img2)
>>> stereoImg.findDisparityMap(method="BM",nDisparity=20).show()
"""
def __init__( self, imgLeft , imgRight ):
self.ImageLeft = imgLeft
self.ImageRight = imgRight
if self.ImageLeft.size() != self.ImageRight.size():
logger.warning('Left and Right images should have the same size.')
return None
else:
self.size = self.ImageLeft.size()
def findFundamentalMat(self, thresh=500.00, minDist=0.15 ):
"""
**SUMMARY**
This method returns the fundamental matrix F such that (P_2).T F P_1 = 0
**PARAMETERS**
* *thresh* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
**RETURNS**
Return None if it fails.
* *F* - Fundamental matrix as ndarray.
* *matched_pts1* - the matched points (x, y) in img1
* *matched_pts2* - the matched points (x, y) in img2
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
**NOTE**
If you deal with the fundamental matrix F directly, be aware of (P_2).T F P_1 = 0
where P_2 and P_1 consist of (y, x, 1)
"""
(kpts1, desc1) = self.ImageLeft._getRawKeypoints(thresh)
(kpts2, desc2) = self.ImageRight._getRawKeypoints(thresh)
if desc1 == None or desc2 == None:
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry.")
return None
num_pts1 = desc1.shape[0]
num_pts2 = desc2.shape[0]
magic_ratio = 1.00
if num_pts1 > num_pts2:
magic_ratio = float(num_pts1) / float(num_pts2)
(idx, dist) = Image()._getFLANNMatches(desc1, desc2)
p = dist.squeeze()
result = p * magic_ratio < minDist
try:
import cv2
except:
logger.warning("Can't use fundamental matrix without OpenCV >= 2.3.0")
return None
pts1 = np.array([kpt.pt for kpt in kpts1])
pts2 = np.array([kpt.pt for kpt in kpts2])
matched_pts1 = pts1[idx[result]].squeeze()
matched_pts2 = pts2[result]
(F, mask) = cv2.findFundamentalMat(matched_pts1, matched_pts2, method=cv.CV_FM_LMEDS)
inlier_ind = mask.nonzero()[0]
matched_pts1 = matched_pts1[inlier_ind, :]
matched_pts2 = matched_pts2[inlier_ind, :]
matched_pts1 = matched_pts1[:, ::-1.00]
matched_pts2 = matched_pts2[:, ::-1.00]
return (F, matched_pts1, matched_pts2)
def findHomography( self, thresh=500.00, minDist=0.15):
"""
**SUMMARY**
This method returns the homography H such that P2 ~ H P1
**PARAMETERS**
* *thresh* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
**RETURNS**
Return None if it fails.
* *H* - homography as ndarray.
* *matched_pts1* - the matched points (x, y) in img1
* *matched_pts2* - the matched points (x, y) in img2
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> H,pts1,pts2 = stereoImg.findHomography()
**NOTE**
If you deal with the homography H directly, be aware of P2 ~ H P1
where P2 and P1 consist of (y, x, 1)
"""
(kpts1, desc1) = self.ImageLeft._getRawKeypoints(thresh)
(kpts2, desc2) = self.ImageRight._getRawKeypoints(thresh)
if desc1 == None or desc2 == None:
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry.")
return None
num_pts1 = desc1.shape[0]
num_pts2 = desc2.shape[0]
magic_ratio = 1.00
if num_pts1 > num_pts2:
magic_ratio = float(num_pts1) / float(num_pts2)
(idx, dist) = Image()._getFLANNMatches(desc1, desc2)
p = dist.squeeze()
result = p * magic_ratio < minDist
try:
import cv2
except:
logger.warning("Can't use homography without OpenCV >= 2.3.0")
return None
pts1 = np.array([kpt.pt for kpt in kpts1])
pts2 = np.array([kpt.pt for kpt in kpts2])
matched_pts1 = pts1[idx[result]].squeeze()
matched_pts2 = pts2[result]
(H, mask) = cv2.findHomography(matched_pts1, matched_pts2,
method=cv.CV_LMEDS)
inlier_ind = mask.nonzero()[0]
matched_pts1 = matched_pts1[inlier_ind, :]
matched_pts2 = matched_pts2[inlier_ind, :]
matched_pts1 = matched_pts1[:, ::-1.00]
matched_pts2 = matched_pts2[:, ::-1.00]
return (H, matched_pts1, matched_pts2)
def findDisparityMap( self, nDisparity=16 ,method='BM'):
"""
The method generates disparity map from set of stereo images.
**PARAMETERS**
* *method* :
*BM* - Block Matching algorithm, this is a real time algorithm.
*SGBM* - Semi Global Block Matching algorithm, this is not a real time algorithm.
*GC* - Graph Cut algorithm, This is not a real time algorithm.
* *nDisparity* - Maximum disparity value. This should be multiple of 16
* *scale* - Scale factor
**RETURNS**
Return None if it fails.
Returns Disparity Map Image
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> disp = stereoImg.findDisparityMap(method="BM")
"""
gray_left = self.ImageLeft.getGrayscaleMatrix()
gray_right = self.ImageRight.getGrayscaleMatrix()
(r, c) = self.size
scale = int(self.ImageLeft.depth)
if nDisparity % 16 !=0 :
if nDisparity < 16 :
nDisparity = 16
nDisparity = (nDisparity/16)*16
try :
if method == 'BM':
disparity = cv.CreateMat(c, r, cv.CV_32F)
state = cv.CreateStereoBMState()
state.SADWindowSize = 41
state.preFilterType = 1
state.preFilterSize = 41
state.preFilterCap = 31
state.minDisparity = -8
state.numberOfDisparities = nDisparity
state.textureThreshold = 10
#state.speckleRange = 32
#state.speckleWindowSize = 100
state.uniquenessRatio=15
cv.FindStereoCorrespondenceBM(gray_left, gray_right, disparity, state)
disparity_visual = cv.CreateMat(c, r, cv.CV_8U)
cv.Normalize( disparity, disparity_visual, 0, 256, cv.CV_MINMAX )
disparity_visual = Image(disparity_visual)
return Image(disparity_visual.getBitmap(),colorSpace=ColorSpace.GRAY)
elif method == 'GC':
disparity_left = cv.CreateMat(c, r, cv.CV_32F)
disparity_right = cv.CreateMat(c, r, cv.CV_32F)
state = cv.CreateStereoGCState(nDisparity, 8)
state.minDisparity = -8
cv.FindStereoCorrespondenceGC( gray_left, gray_right, disparity_left, disparity_right, state, 0)
disparity_left_visual = cv.CreateMat(c, r, cv.CV_8U)
cv.Normalize( disparity_left, disparity_left_visual, 0, 256, cv.CV_MINMAX )
#cv.Scale(disparity_left, disparity_left_visual, -scale)
disparity_left_visual = Image(disparity_left_visual)
return Image(disparity_left_visual.getBitmap(),colorSpace=ColorSpace.GRAY)
elif method == 'SGBM':
try:
import cv2
ver = cv2.__version__
if ver.startswith("$Rev :"):
logger.warning("Can't use SGBM without OpenCV >= 2.4.0")
return None
except:
logger.warning("Can't use SGBM without OpenCV >= 2.4.0")
return None
state = cv2.StereoSGBM()
state.SADWindowSize = 41
state.preFilterCap = 31
state.minDisparity = 0
state.numberOfDisparities = nDisparity
#state.speckleRange = 32
#state.speckleWindowSize = 100
state.disp12MaxDiff = 1
state.fullDP=False
state.P1 = 8 * 1 * 41 * 41
state.P2 = 32 * 1 * 41 * 41
state.uniquenessRatio=15
disparity=state.compute(self.ImageLeft.getGrayNumpy(),self.ImageRight.getGrayNumpy())
return Image(disparity)
else :
logger.warning("Unknown method. Choose one method amoung BM or SGBM or GC !")
return None
except :
logger.warning("Error in computing the Disparity Map, may be due to the Images are stereo in nature.")
return None
def Eline (self, point, F, whichImage):
"""
**SUMMARY**
This method returns, line feature object.
**PARAMETERS**
* *point* - Input point (x, y)
* *F* - Fundamental matrix.
* *whichImage* - Index of the image (1 or 2) that contains the point
**RETURNS**
epipolar line, in the form of line feature object.
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> epiline = mapper.Eline(point,F, 1) #find corresponding Epipolar line in the left image.
"""
from SimpleCV.Features.Detection import Line
pts1 = (0,0)
pts2 = self.size
pt_cvmat = cv.CreateMat(1, 1, cv.CV_32FC2)
pt_cvmat[0, 0] = (point[1], point[0]) # OpenCV seems to use (y, x) coordinate.
line = cv.CreateMat(1, 1, cv.CV_32FC3)
cv.ComputeCorrespondEpilines(pt_cvmat, whichImage, npArray2cvMat(F), line)
line_npArray = np.array(line).squeeze()
line_npArray = line_npArray[[1.00, 0, 2]]
pts1 = (pts1[0],(-line_npArray[2]-line_npArray[0]*pts1[0])/line_npArray[1] )
pts2 = (pts2[0],(-line_npArray[2]-line_npArray[0]*pts2[0])/line_npArray[1] )
if whichImage == 1 :
return Line(self.ImageLeft, [pts1,pts2])
elif whichImage == 2 :
return Line(self.ImageRight, [pts1,pts2])
def projectPoint( self, point, H ,whichImage):
"""
**SUMMARY**
This method returns the corresponding point (x, y)
**PARAMETERS**
* *point* - Input point (x, y)
* *whichImage* - Index of the image (1 or 2) that contains the point
* *H* - Homography that can be estimated
using StereoCamera.findHomography()
**RETURNS**
Corresponding point (x, y) as tuple
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> projectPoint = stereoImg.projectPoint(point,H ,1) #finds corresponding point in the left image.
"""
H = np.matrix(H)
point = np.matrix((point[1], point[0],1.00))
if whichImage == 1.00:
corres_pt = H * point.T
else:
corres_pt = np.linalg.inv(H) * point.T
corres_pt = corres_pt / corres_pt[2]
return (float(corres_pt[1]), float(corres_pt[0]))
def get3DImage(self, Q, method="BM", state=None):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *Q* - reprojection Matrix (disparity to depth matrix)
* *method* - Stereo Correspondonce method to be used.
- "BM" - Stereo BM
- "SGBM" - Stereo SGBM
* *state* - dictionary corresponding to parameters of
stereo correspondonce.
SADWindowSize - odd int
nDisparity - int
minDisparity - int
preFilterCap - int
preFilterType - int (only BM)
speckleRange - int
speckleWindowSize - int
P1 - int (only SGBM)
P2 - int (only SGBM)
fullDP - Bool (only SGBM)
uniquenessRatio - int
textureThreshold - int (only BM)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoImage.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoImage(lImage, rImage)
>>> Q = cv.Load("Q.yml")
>>> stereo.get3DImage(Q).show()
>>> state = {"SADWindowSize":9, "nDisparity":112, "minDisparity":-39}
>>> stereo.get3DImage(Q, "BM", state).show()
>>> stereo.get3DImage(Q, "SGBM", state).show()
"""
imgLeft = self.ImageLeft
imgRight = self.ImageRight
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
(r, c) = self.size
if method == "BM":
sbm = cv.CreateStereoBMState()
disparity = cv.CreateMat(c, r, cv.CV_32F)
if state:
SADWindowSize = state.get("SADWindowSize")
preFilterCap = state.get("preFilterCap")
minDisparity = state.get("minDisparity")
numberOfDisparities = state.get("nDisparity")
uniquenessRatio = state.get("uniquenessRatio")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
textureThreshold = state.get("textureThreshold")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
preFilterType = state.get("perFilterType")
if SADWindowSize is not None:
sbm.SADWindowSize = SADWindowSize
if preFilterCap is not None:
sbm.preFilterCap = preFilterCap
if minDisparity is not None:
sbm.minDisparity = minDisparity
if numberOfDisparities is not None:
sbm.numberOfDisparities = numberOfDisparities
if uniquenessRatio is not None:
sbm.uniquenessRatio = uniquenessRatio
if speckleRange is not None:
sbm.speckleRange = speckleRange
if speckleWindowSize is not None:
sbm.speckleWindowSize = speckleWindowSize
if textureThreshold is not None:
sbm.textureThreshold = textureThreshold
if preFilterType is not None:
sbm.preFilterType = preFilterType
else:
sbm.SADWindowSize = 9
sbm.preFilterType = 1
sbm.preFilterSize = 5
sbm.preFilterCap = 61
sbm.minDisparity = -39
sbm.numberOfDisparities = 112
sbm.textureThreshold = 507
sbm.uniquenessRatio= 0
sbm.speckleRange = 8
sbm.speckleWindowSize = 0
gray_left = imgLeft.getGrayscaleMatrix()
gray_right = imgRight.getGrayscaleMatrix()
cv.FindStereoCorrespondenceBM(gray_left, gray_right, disparity, sbm)
disparity_visual = cv.CreateMat(c, r, cv.CV_8U)
elif method == "SGBM":
if not cv2flag:
warnings.warn("Can't Use SGBM without OpenCV >= 2.4. Use SBM instead.")
sbm = cv2.StereoSGBM()
if state:
SADWindowSize = state.get("SADWindowSize")
preFilterCap = state.get("preFilterCap")
minDisparity = state.get("minDisparity")
numberOfDisparities = state.get("nDisparity")
P1 = state.get("P1")
P2 = state.get("P2")
uniquenessRatio = state.get("uniquenessRatio")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
fullDP = state.get("fullDP")
if SADWindowSize is not None:
sbm.SADWindowSize = SADWindowSize
if preFilterCap is not None:
sbm.preFilterCap = preFilterCap
if minDisparity is not None:
sbm.minDisparity = minDisparity
if numberOfDisparities is not None:
sbm.numberOfDisparities = numberOfDisparities
if P1 is not None:
sbm.P1 = P1
if P2 is not None:
sbm.P2 = P2
if uniquenessRatio is not None:
sbm.uniquenessRatio = uniquenessRatio
if speckleRange is not None:
sbm.speckleRange = speckleRange
if speckleWindowSize is not None:
sbm.speckleWindowSize = speckleWindowSize
if fullDP is not None:
sbm.fullDP = fullDP
else:
sbm.SADWindowSize = 9;
sbm.numberOfDisparities = 96;
sbm.preFilterCap = 63;
sbm.minDisparity = -21;
sbm.uniquenessRatio = 7;
sbm.speckleWindowSize = 0;
sbm.speckleRange = 8;
sbm.disp12MaxDiff = 1;
sbm.fullDP = False;
disparity = sbm.compute(imgLeft.getGrayNumpyCv2(), imgRight.getGrayNumpyCv2())
else:
warnings.warn("Unknown method. Returning None")
return None
if cv2flag:
if not isinstance(Q, np.ndarray):
Q = np.array(Q)
if not isinstance(disparity, np.ndarray):
disparity = np.array(disparity)
Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F)
Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3)
retVal = Image(Image3D_normalize, cv2image=True)
else:
Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3)
Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3)
cv.ReprojectImageTo3D(disparity, Image3D, Q)
cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3)
retVal = Image(Image3D_normalize)
self.Image3D = Image3D
return retVal
def get3DImageFromDisparity(self, disparity, Q):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *disparity* - Disparity Image
* *Q* - reprojection Matrix (disparity to depth matrix)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoCamera.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoCamera()
>>> Q = cv.Load("Q.yml")
>>> disp = stereo.findDisparityMap()
>>> stereo.get3DImageFromDisparity(disp, Q)
"""
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
if cv2flag:
if not isinstance(Q, np.ndarray):
Q = np.array(Q)
disparity = disparity.getNumpyCv2()
Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F)
Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3)
retVal = Image(Image3D_normalize, cv2image=True)
else:
disparity = disparity.getMatrix()
Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3)
Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3)
cv.ReprojectImageTo3D(disparity, Image3D, Q)
cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3)
retVal = Image(Image3D_normalize)
self.Image3D = Image3D
return retVal
class StereoCamera :
"""
Stereo Camera is a class dedicated for calibration stereo camera. It also has functionalites for
rectification and getting undistorted Images.
This class can be used to calculate various parameters related to both the camera's :
-> Camera Matrix
-> Distortion coefficients
-> Rotation and Translation matrix
-> Rectification transform (rotation matrix)
-> Projection matrix in the new (rectified) coordinate systems
-> Disparity-to-depth mapping matrix (Q)
"""
def __init__(self):
return
def stereoCalibration(self,camLeft, camRight, nboards=30, chessboard=(8, 5), gridsize=0.027, WinSize = (352,288)):
"""
**SUMMARY**
Stereo Calibration is a way in which you obtain the parameters that will allow you to calculate 3D information of the scene.
Once both the camera's are initialized.
Press [Space] once chessboard is identified in both the camera's.
Press [esc] key to exit the calibration process.
**PARAMETERS**
* camLeft - Left camera index.
* camRight - Right camera index.
* nboards - Number of samples or multiple views of the chessboard in different positions and orientations with your stereo camera.
* chessboard - A tuple of Cols, Rows in the chessboard (used for calibration).
* gridsize - chessboard grid size in real units
* WinSize - This is the window resolution.
**RETURNS**
A tuple of the form (CM1, CM2, D1, D2, R, T, E, F) on success
CM1 - Camera Matrix for left camera,
CM2 - Camera Matrix for right camera,
D1 - Vector of distortion coefficients for left camera,
D2 - Vector of distortion coefficients for right camera,
R - Rotation matrix between the left and the right camera coordinate systems,
T - Translation vector between the left and the right coordinate systems of the cameras,
E - Essential matrix,
F - Fundamental matrix
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.StereoCalibration(1,2,nboards=40)
**Note**
Press space to capture the images.
"""
count = 0
n1="Left"
n2="Right"
try :
captureLeft = cv.CaptureFromCAM(camLeft)
cv.SetCaptureProperty(captureLeft, cv.CV_CAP_PROP_FRAME_WIDTH, WinSize[0])
cv.SetCaptureProperty(captureLeft, cv.CV_CAP_PROP_FRAME_HEIGHT, WinSize[1])
frameLeft = cv.QueryFrame(captureLeft)
cv.FindChessboardCorners(frameLeft, (chessboard))
captureRight = cv.CaptureFromCAM(camRight)
cv.SetCaptureProperty(captureRight, cv.CV_CAP_PROP_FRAME_WIDTH, WinSize[0])
cv.SetCaptureProperty(captureRight, cv.CV_CAP_PROP_FRAME_HEIGHT, WinSize[1])
frameRight = cv.QueryFrame(captureRight)
cv.FindChessboardCorners(frameRight, (chessboard))
except :
print "Error Initialising the Left and Right camera"
return None
imagePoints1 = cv.CreateMat(1, nboards * chessboard[0] * chessboard[1], cv.CV_64FC2)
imagePoints2 = cv.CreateMat(1, nboards * chessboard[0] * chessboard[1], cv.CV_64FC2)
objectPoints = cv.CreateMat(1, chessboard[0] * chessboard[1] * nboards, cv.CV_64FC3)
nPoints = cv.CreateMat(1, nboards, cv.CV_32S)
# the intrinsic camera matrices
CM1 = cv.CreateMat(3, 3, cv.CV_64F)
CM2 = cv.CreateMat(3, 3, cv.CV_64F)
# the distortion coefficients of both cameras
D1 = cv.CreateMat(1, 5, cv.CV_64F)
D2 = cv.CreateMat(1, 5, cv.CV_64F)
# matrices governing the rotation and translation from camera 1 to camera 2
R = cv.CreateMat(3, 3, cv.CV_64F)
T = cv.CreateMat(3, 1, cv.CV_64F)
# the essential and fundamental matrices
E = cv.CreateMat(3, 3, cv.CV_64F)
F = cv.CreateMat(3, 3, cv.CV_64F)
while True:
frameLeft = cv.QueryFrame(captureLeft)
cv.Flip(frameLeft, frameLeft, 1)
frameRight = cv.QueryFrame(captureRight)
cv.Flip(frameRight, frameRight, 1)
k = cv.WaitKey(3)
cor1 = cv.FindChessboardCorners(frameLeft, (chessboard))
if cor1[0] :
cv.DrawChessboardCorners(frameLeft, (chessboard), cor1[1], cor1[0])
cv.ShowImage(n1, frameLeft)
cor2 = cv.FindChessboardCorners(frameRight, (chessboard))
if cor2[0]:
cv.DrawChessboardCorners(frameRight, (chessboard), cor2[1], cor2[0])
cv.ShowImage(n2, frameRight)
if cor1[0] and cor2[0] and k==0x20:
print count
for i in range(0, len(cor1[1])):
cv.Set1D(imagePoints1, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor1[1][i][0], cor1[1][i][1]))
cv.Set1D(imagePoints2, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor2[1][i][0], cor2[1][i][1]))
count += 1
if count == nboards:
cv.DestroyAllWindows()
for i in range(nboards):
for j in range(chessboard[1]):
for k in range(chessboard[0]):
cv.Set1D(objectPoints, i * chessboard[1] * chessboard[0] + j * chessboard[0] + k, (k * gridsize, j * gridsize, 0))
for i in range(nboards):
cv.Set1D(nPoints, i, chessboard[0] * chessboard[1])
cv.SetIdentity(CM1)
cv.SetIdentity(CM2)
cv.Zero(D1)
cv.Zero(D2)
print "Running stereo calibration..."
del(camLeft)
del(camRight)
cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, nPoints, CM1, D1, CM2, D2, WinSize, R, T, E, F,
flags=cv.CV_CALIB_SAME_FOCAL_LENGTH | cv.CV_CALIB_ZERO_TANGENT_DIST)
print "Done."
return (CM1, CM2, D1, D2, R, T, E, F)
cv.ShowImage(n1, frameLeft)
cv.ShowImage(n2, frameRight)
if k == 0x1b:
print "ESC pressed. Exiting. WARNING: NOT ENOUGH CHESSBOARDS FOUND YET"
cv.DestroyAllWindows()
break
def saveCalibration(self,calibration=None, fname="Stereo",cdir="."):
"""
**SUMMARY**
saveCalibration is a method to save the StereoCalibration parameters such as CM1, CM2, D1, D2, R, T, E, F of stereo pair.
This method returns True on success and saves the calibration in the following format.
StereoCM1.txt
StereoCM2.txt
StereoD1.txt
StereoD2.txt
StereoR.txt
StereoT.txt
StereoE.txt
StereoF.txt
**PARAMETERS**
calibration - is a tuple os the form (CM1, CM2, D1, D2, R, T, E, F)
CM1 -> Camera Matrix for left camera,
CM2 -> Camera Matrix for right camera,
D1 -> Vector of distortion coefficients for left camera,
D2 -> Vector of distortion coefficients for right camera,
R -> Rotation matrix between the left and the right camera coordinate systems,
T -> Translation vector between the left and the right coordinate systems of the cameras,
E -> Essential matrix,
F -> Fundamental matrix
**RETURNS**
return True on success and saves the calibration files.
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.StereoCalibration(1,2,nboards=40)
>>> StereoCam.saveCalibration(calibration,fname="Stereo1")
"""
filenames = (fname+"CM1.txt", fname+"CM2.txt", fname+"D1.txt", fname+"D2.txt", fname+"R.txt", fname+"T.txt", fname+"E.txt", fname+"F.txt")
try :
(CM1, CM2, D1, D2, R, T, E, F) = calibration
cv.Save("{0}/{1}".format(cdir, filenames[0]), CM1)
cv.Save("{0}/{1}".format(cdir, filenames[1]), CM2)
cv.Save("{0}/{1}".format(cdir, filenames[2]), D1)
cv.Save("{0}/{1}".format(cdir, filenames[3]), D2)
cv.Save("{0}/{1}".format(cdir, filenames[4]), R)
cv.Save("{0}/{1}".format(cdir, filenames[5]), T)
cv.Save("{0}/{1}".format(cdir, filenames[6]), E)
cv.Save("{0}/{1}".format(cdir, filenames[7]), F)
print "Calibration parameters written to directory '{0}'.".format(cdir)
return True
except :
return False
def loadCalibration(self,fname="Stereo",dir="."):
"""
**SUMMARY**
loadCalibration is a method to load the StereoCalibration parameters such as CM1, CM2, D1, D2, R, T, E, F of stereo pair.
This method loads from calibration files and return calibration on success else return false.
**PARAMETERS**
fname - is the prefix of the calibration files.
dir - is the directory in which files are present.
**RETURNS**
a tuple of the form (CM1, CM2, D1, D2, R, T, E, F) on success.
CM1 - Camera Matrix for left camera
CM2 - Camera Matrix for right camera
D1 - Vector of distortion coefficients for left camera
D2 - Vector of distortion coefficients for right camera
R - Rotation matrix between the left and the right camera coordinate systems
T - Translation vector between the left and the right coordinate systems of the cameras
E - Essential matrix
F - Fundamental matrix
else returns false
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> loadedCalibration = StereoCam.loadCalibration(fname="Stereo1")
"""
filenames = (fname+"CM1.txt", fname+"CM2.txt", fname+"D1.txt", fname+"D2.txt", fname+"R.txt", fname+"T.txt", fname+"E.txt", fname+"F.txt")
try :
CM1 = cv.Load("{0}/{1}".format(dir, filenames[0]))
CM2 = cv.Load("{0}/{1}".format(dir, filenames[1]))
D1 = cv.Load("{0}/{1}".format(dir, filenames[2]))
D2 = cv.Load("{0}/{1}".format(dir, filenames[3]))
R = cv.Load("{0}/{1}".format(dir, filenames[4]))
T = cv.Load("{0}/{1}".format(dir, filenames[5]))
E = cv.Load("{0}/{1}".format(dir, filenames[6]))
F = cv.Load("{0}/{1}".format(dir, filenames[7]))
print "Calibration files loaded from dir '{0}'.".format(dir)
return (CM1, CM2, D1, D2, R, T, E, F)
except :
return False
def stereoRectify(self,calib=None,WinSize=(352,288)):
"""
**SUMMARY**
Computes rectification transforms for each head of a calibrated stereo camera.
**PARAMETERS**
calibration - is a tuple os the form (CM1, CM2, D1, D2, R, T, E, F)
CM1 - Camera Matrix for left camera,
CM2 - Camera Matrix for right camera,
D1 - Vector of distortion coefficients for left camera,
D2 - Vector of distortion coefficients for right camera,
R - Rotation matrix between the left and the right camera coordinate systems,
T - Translation vector between the left and the right coordinate systems of the cameras,
E - Essential matrix,
F - Fundamental matrix
**RETURNS**
On success returns a a tuple of the format -> (R1, R2, P1, P2, Q, roi)
R1 - Rectification transform (rotation matrix) for the left camera.
R2 - Rectification transform (rotation matrix) for the right camera.
P1 - Projection matrix in the new (rectified) coordinate systems for the left camera.
P2 - Projection matrix in the new (rectified) coordinate systems for the right camera.
Q - disparity-to-depth mapping matrix.
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.loadCalibration(fname="Stereo1")
>>> rectification = StereoCam.stereoRectify(calibration)
"""
(CM1, CM2, D1, D2, R, T, E, F) = calib
R1 = cv.CreateMat(3, 3, cv.CV_64F)
R2 = cv.CreateMat(3, 3, cv.CV_64F)
P1 = cv.CreateMat(3, 4, cv.CV_64F)
P2 = cv.CreateMat(3, 4, cv.CV_64F)
Q = cv.CreateMat(4, 4, cv.CV_64F)
print "Running stereo rectification..."
(leftroi, rightroi) = cv.StereoRectify(CM1, CM2, D1, D2, WinSize, R, T, R1, R2, P1, P2, Q)
roi = []
roi.append(max(leftroi[0], rightroi[0]))
roi.append(max(leftroi[1], rightroi[1]))
roi.append(min(leftroi[2], rightroi[2]))
roi.append(min(leftroi[3], rightroi[3]))
print "Done."
return (R1, R2, P1, P2, Q, roi)
def getImagesUndistort(self,imgLeft, imgRight, calibration, rectification, WinSize=(352,288)):
"""
**SUMMARY**
Rectify two images from the calibration and rectification parameters.
**PARAMETERS**
* *imgLeft* - Image captured from left camera and needs to be rectified.
* *imgRight* - Image captures from right camera and need to be rectified.
* *calibration* - A calibration tuple of the format (CM1, CM2, D1, D2, R, T, E, F)
* *rectification* - A rectification tuple of the format (R1, R2, P1, P2, Q, roi)
**RETURNS**
returns rectified images in a tuple -> (imgLeft,imgRight)
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.loadCalibration(fname="Stereo1")
>>> rectification = StereoCam.stereoRectify(loadedCalibration)
>>> imgLeft = camLeft.getImage()
>>> imgRight = camRight.getImage()
>>> rectLeft,rectRight = StereoCam.getImagesUndistort(imgLeft,imgRight,calibration,rectification)
"""
imgLeft = imgLeft.getMatrix()
imgRight = imgRight.getMatrix()
(CM1, CM2, D1, D2, R, T, E, F) = calibration
(R1, R2, P1, P2, Q, roi) = rectification
dst1 = cv.CloneMat(imgLeft)
dst2 = cv.CloneMat(imgRight)
map1x = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map2x = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map1y = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map2y = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
#print "Rectifying images..."
cv.InitUndistortRectifyMap(CM1, D1, R1, P1, map1x, map1y)
cv.InitUndistortRectifyMap(CM2, D2, R2, P2, map2x, map2y)
cv.Remap(imgLeft, dst1, map1x, map1y)
cv.Remap(imgRight, dst2, map2x, map2y)
return Image(dst1), Image(dst2)
def get3DImage(self, leftIndex, rightIndex, Q, method="BM", state=None):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *leftIndex* - Index of left camera
* *rightIndex* - Index of right camera
* *Q* - reprojection Matrix (disparity to depth matrix)
* *method* - Stereo Correspondonce method to be used.
- "BM" - Stereo BM
- "SGBM" - Stereo SGBM
* *state* - dictionary corresponding to parameters of
stereo correspondonce.
SADWindowSize - odd int
nDisparity - int
minDisparity - int
preFilterCap - int
preFilterType - int (only BM)
speckleRange - int
speckleWindowSize - int
P1 - int (only SGBM)
P2 - int (only SGBM)
fullDP - Bool (only SGBM)
uniquenessRatio - int
textureThreshold - int (only BM)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoCamera.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoCamera()
>>> Q = cv.Load("Q.yml")
>>> stereo.get3DImage(1, 2, Q).show()
>>> state = {"SADWindowSize":9, "nDisparity":112, "minDisparity":-39}
>>> stereo.get3DImage(1, 2, Q, "BM", state).show()
>>> stereo.get3DImage(1, 2, Q, "SGBM", state).show()
"""
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
if cv2flag:
camLeft = cv2.VideoCapture(leftIndex)
camRight = cv2.VideoCapture(rightIndex)
if camLeft.isOpened():
_, imgLeft = camLeft.read()
else:
warnings.warn("Unable to open left camera")
return None
if camRight.isOpened():
_, imgRight = camRight.read()
else:
warnings.warn("Unable to open right camera")
return None
imgLeft = Image(imgLeft, cv2image=True)
imgRight = Image(imgRight, cv2image=True)
else:
camLeft = cv.CaptureFromCAM(leftIndex)
camRight = cv.CaptureFromCAM(rightIndex)
imgLeft = cv.QueryFrame(camLeft)
if imgLeft is None:
warnings.warn("Unable to open left camera")
return None
imgRight = cv.QueryFrame(camRight)
if imgRight is None:
warnings.warn("Unable to open right camera")
return None
imgLeft = Image(imgLeft, cv2image=True)
imgRight = Image(imgRight, cv2image=True)
del camLeft
del camRight
stereoImages = StereoImage(imgLeft, imgRight)
Image3D_normalize = stereoImages.get3DImage(Q, method, state)
self.Image3D = stereoImages.Image3D
return Image3D_normalize
class AVTCameraThread(threading.Thread):
camera = None
run = True
verbose = False
lock = None
logger = None
framerate = 0
def __init__(self, camera):
super(AVTCameraThread, self).__init__()
self._stop = threading.Event()
self.camera = camera
self.lock = threading.Lock()
self.name = 'Thread-Camera-ID-' + str(self.camera.uniqueid)
def run(self):
counter = 0
timestamp = time.time()
while self.run:
self.lock.acquire()
self.camera.runCommand("AcquisitionStart")
frame = self.camera._getFrame(1000)
if frame:
img = Image(pil.fromstring(self.camera.imgformat,
(self.camera.width, self.camera.height),
frame.ImageBuffer[:int(frame.ImageBufferSize)]))
self.camera._buffer.appendleft(img)
self.camera.runCommand("AcquisitionStop")
self.lock.release()
counter += 1
time.sleep(0.01)
if time.time() - timestamp >= 1:
self.camera.framerate = counter
counter = 0
timestamp = time.time()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
AVTCameraErrors = [
("ePvErrSuccess", "No error"),
("ePvErrCameraFault", "Unexpected camera fault"),
("ePvErrInternalFault", "Unexpected fault in PvApi or driver"),
("ePvErrBadHandle", "Camera handle is invalid"),
("ePvErrBadParameter", "Bad parameter to API call"),
("ePvErrBadSequence", "Sequence of API calls is incorrect"),
("ePvErrNotFound", "Camera or attribute not found"),
("ePvErrAccessDenied", "Camera cannot be opened in the specified mode"),
("ePvErrUnplugged", "Camera was unplugged"),
("ePvErrInvalidSetup", "Setup is invalid (an attribute is invalid)"),
("ePvErrResources", "System/network resources or memory not available"),
("ePvErrBandwidth", "1394 bandwidth not available"),
("ePvErrQueueFull", "Too many frames on queue"),
("ePvErrBufferTooSmall", "Frame buffer is too small"),
("ePvErrCancelled", "Frame cancelled by user"),
("ePvErrDataLost", "The data for the frame was lost"),
("ePvErrDataMissing", "Some data in the frame is missing"),
("ePvErrTimeout", "Timeout during wait"),
("ePvErrOutOfRange", "Attribute value is out of the expected range"),
("ePvErrWrongType", "Attribute is not this type (wrong access function)"),
("ePvErrForbidden", "Attribute write forbidden at this time"),
("ePvErrUnavailable", "Attribute is not available at this time"),
("ePvErrFirewall", "A firewall is blocking the traffic (Windows only)"),
]
def pverr(errcode):
if errcode:
raise Exception(": ".join(AVTCameraErrors[errcode]))
class AVTCamera(FrameSource):
"""
**SUMMARY**
AVTCamera is a ctypes wrapper for the Prosilica/Allied Vision cameras,
such as the "manta" series.
These require the PvAVT binary driver from Allied Vision:
http://www.alliedvisiontec.com/us/products/1108.html
Note that as of time of writing the new VIMBA driver is not available
for Mac/Linux - so this uses the legacy PvAVT drive
Props to Cixelyn, whos py-avt-pvapi module showed how to get much
of this working https://bitbucket.org/Cixelyn/py-avt-pvapi
All camera properties are directly from the PvAVT manual -- if not
specified it will default to whatever the camera state is. Cameras
can either by
**EXAMPLE**
>>> cam = AVTCamera(0, {"width": 656, "height": 492})
>>>
>>> img = cam.getImage()
>>> img.show()
"""
_buffer = None # Buffer to store images
_buffersize = 10 # Number of images to keep in the rolling image buffer for threads
_lastimage = None # Last image loaded into memory
_thread = None
_framerate = 0
threaded = False
_pvinfo = { }
_properties = {
"AcqEndTriggerEvent": ("Enum", "R/W"),
"AcqEndTriggerMode": ("Enum", "R/W"),
"AcqRecTriggerEvent": ("Enum", "R/W"),
"AcqRecTriggerMode": ("Enum", "R/W"),
"AcqStartTriggerEvent": ("Enum", "R/W"),
"AcqStartTriggerMode": ("Enum", "R/W"),
"FrameRate": ("Float32", "R/W"),
"FrameStartTriggerDelay": ("Uint32", "R/W"),
"FrameStartTriggerEvent": ("Enum", "R/W"),
"FrameStartTriggerMode": ("Enum", "R/W"),
"FrameStartTriggerOverlap": ("Enum", "R/W"),
"AcquisitionFrameCount": ("Uint32", "R/W"),
"AcquisitionMode": ("Enum", "R/W"),
"RecorderPreEventCount": ("Uint32", "R/W"),
"ConfigFileIndex": ("Enum", "R/W"),
"ConfigFilePowerup": ("Enum", "R/W"),
"DSPSubregionBottom": ("Uint32", "R/W"),
"DSPSubregionLeft": ("Uint32", "R/W"),
"DSPSubregionRight": ("Uint32", "R/W"),
"DSPSubregionTop": ("Uint32", "R/W"),
"DefectMaskColumnEnable": ("Enum", "R/W"),
"ExposureAutoAdjustTol": ("Uint32", "R/W"),
"ExposureAutoAlg": ("Enum", "R/W"),
"ExposureAutoMax": ("Uint32", "R/W"),
"ExposureAutoMin": ("Uint32", "R/W"),
"ExposureAutoOutliers": ("Uint32", "R/W"),
"ExposureAutoRate": ("Uint32", "R/W"),
"ExposureAutoTarget": ("Uint32", "R/W"),
"ExposureMode": ("Enum", "R/W"),
"ExposureValue": ("Uint32", "R/W"),
"GainAutoAdjustTol": ("Uint32", "R/W"),
"GainAutoMax": ("Uint32", "R/W"),
"GainAutoMin": ("Uint32", "R/W"),
"GainAutoOutliers": ("Uint32", "R/W"),
"GainAutoRate": ("Uint32", "R/W"),
"GainAutoTarget": ("Uint32", "R/W"),
"GainMode": ("Enum", "R/W"),
"GainValue": ("Uint32", "R/W"),
"LensDriveCommand": ("Enum", "R/W"),
"LensDriveDuration": ("Uint32", "R/W"),
"LensVoltage": ("Uint32", "R/V"),
"LensVoltageControl": ("Uint32", "R/W"),
"IrisAutoTarget": ("Uint32", "R/W"),
"IrisMode": ("Enum", "R/W"),
"IrisVideoLevel": ("Uint32", "R/W"),
"IrisVideoLevelMax": ("Uint32", "R/W"),
"IrisVideoLevelMin": ("Uint32", "R/W"),
"VsubValue": ("Uint32", "R/C"),
"WhitebalAutoAdjustTol": ("Uint32", "R/W"),
"WhitebalAutoRate": ("Uint32", "R/W"),
"WhitebalMode": ("Enum", "R/W"),
"WhitebalValueRed": ("Uint32", "R/W"),
"WhitebalValueBlue": ("Uint32", "R/W"),
"EventAcquisitionStart": ("Uint32", "R/C 40000"),
"EventAcquisitionEnd": ("Uint32", "R/C 40001"),
"EventFrameTrigger": ("Uint32", "R/C 40002"),
"EventExposureEnd": ("Uint32", "R/C 40003"),
"EventAcquisitionRecordTrigger": ("Uint32", "R/C 40004"),
"EventSyncIn1Rise": ("Uint32", "R/C 40010"),
"EventSyncIn1Fall": ("Uint32", "R/C 40011"),
"EventSyncIn2Rise": ("Uint32", "R/C 40012"),
"EventSyncIn2Fall": ("Uint32", "R/C 40013"),
"EventSyncIn3Rise": ("Uint32", "R/C 40014"),
"EventSyncIn3Fall": ("Uint32", "R/C 40015"),
"EventSyncIn4Rise": ("Uint32", "R/C 40016"),
"EventSyncIn4Fall": ("Uint32", "R/C 40017"),
"EventOverflow": ("Uint32", "R/C 65534"),
"EventError": ("Uint32", "R/C"),
"EventNotification": ("Enum", "R/W"),
"EventSelector": ("Enum", "R/W"),
"EventsEnable1": ("Uint32", "R/W"),
"BandwidthCtrlMode": ("Enum", "R/W"),
"ChunkModeActive": ("Boolean", "R/W"),
"NonImagePayloadSize": ("Unit32", "R/V"),
"PayloadSize": ("Unit32", "R/V"),
"StreamBytesPerSecond": ("Uint32", "R/W"),
"StreamFrameRateConstrain": ("Boolean", "R/W"),
"StreamHoldCapacity": ("Uint32", "R/V"),
"StreamHoldEnable": ("Enum", "R/W"),
"TimeStampFrequency": ("Uint32", "R/C"),
"TimeStampValueHi": ("Uint32", "R/V"),
"TimeStampValueLo": ("Uint32", "R/V"),
"Height": ("Uint32", "R/W"),
"RegionX": ("Uint32", "R/W"),
"RegionY": ("Uint32", "R/W"),
"Width": ("Uint32", "R/W"),
"PixelFormat": ("Enum", "R/W"),
"TotalBytesPerFrame": ("Uint32", "R/V"),
"BinningX": ("Uint32", "R/W"),
"BinningY": ("Uint32", "R/W"),
"CameraName": ("String", "R/W"),
"DeviceFirmwareVersion": ("String", "R/C"),
"DeviceModelName": ("String", "R/W"),
"DevicePartNumber": ("String", "R/C"),
"DeviceSerialNumber": ("String", "R/C"),
"DeviceVendorName": ("String", "R/C"),
"FirmwareVerBuild": ("Uint32", "R/C"),
"FirmwareVerMajor": ("Uint32", "R/C"),
"FirmwareVerMinor": ("Uint32", "R/C"),
"PartClass": ("Uint32", "R/C"),
"PartNumber": ("Uint32", "R/C"),
"PartRevision": ("String", "R/C"),
"PartVersion": ("String", "R/C"),
"SerialNumber": ("String", "R/C"),
"SensorBits": ("Uint32", "R/C"),
"SensorHeight": ("Uint32", "R/C"),
"SensorType": ("Enum", "R/C"),
"SensorWidth": ("Uint32", "R/C"),
"UniqueID": ("Uint32", "R/C"),
"Strobe1ControlledDuration": ("Enum", "R/W"),
"Strobe1Delay": ("Uint32", "R/W"),
"Strobe1Duration": ("Uint32", "R/W"),
"Strobe1Mode": ("Enum", "R/W"),
"SyncIn1GlitchFilter": ("Uint32", "R/W"),
"SyncInLevels": ("Uint32", "R/V"),
"SyncOut1Invert": ("Enum", "R/W"),
"SyncOut1Mode": ("Enum", "R/W"),
"SyncOutGpoLevels": ("Uint32", "R/W"),
"DeviceEthAddress": ("String", "R/C"),
"HostEthAddress": ("String", "R/C"),
"DeviceIPAddress": ("String", "R/C"),
"HostIPAddress": ("String", "R/C"),
"GvcpRetries": ("Uint32", "R/W"),
"GvspLookbackWindow": ("Uint32", "R/W"),
"GvspResentPercent": ("Float32", "R/W"),
"GvspRetries": ("Uint32", "R/W"),
"GvspSocketBufferCount": ("Enum", "R/W"),
"GvspTimeout": ("Uint32", "R/W"),
"HeartbeatInterval": ("Uint32", "R/W"),
"HeartbeatTimeout": ("Uint32", "R/W"),
"MulticastEnable": ("Enum", "R/W"),
"MulticastIPAddress": ("String", "R/W"),
"PacketSize": ("Uint32", "R/W"),
"StatDriverType": ("Enum", "R/V"),
"StatFilterVersion": ("String", "R/C"),
"StatFrameRate": ("Float32", "R/V"),
"StatFramesCompleted": ("Uint32", "R/V"),
"StatFramesDropped": ("Uint32", "R/V"),
"StatPacketsErroneous": ("Uint32", "R/V"),
"StatPacketsMissed": ("Uint32", "R/V"),
"StatPacketsReceived": ("Uint32", "R/V"),
"StatPacketsRequested": ("Uint32", "R/V"),
"StatPacketResent": ("Uint32", "R/V")
}
class AVTCameraInfo(ct.Structure):
"""
AVTCameraInfo is an internal ctypes.Structure-derived class which
contains metadata about cameras on the local network.
Properties include:
* UniqueId
* CameraName
* ModelName
* PartNumber
* SerialNumber
* FirmwareVersion
* PermittedAccess
* InterfaceId
* InterfaceType
"""
_fields_ = [
("StructVer", ct.c_ulong),
("UniqueId", ct.c_ulong),
("CameraName", ct.c_char*32),
("ModelName", ct.c_char*32),
("PartNumber", ct.c_char*32),
("SerialNumber", ct.c_char*32),
("FirmwareVersion", ct.c_char*32),
("PermittedAccess", ct.c_long),
("InterfaceId", ct.c_ulong),
("InterfaceType", ct.c_int)
]
def __repr__(self):
return "<SimpleCV.Camera.AVTCameraInfo - UniqueId: %s>" % (self.UniqueId)
class AVTFrame(ct.Structure):
_fields_ = [
("ImageBuffer", ct.POINTER(ct.c_char)),
("ImageBufferSize", ct.c_ulong),
("AncillaryBuffer", ct.c_int),
("AncillaryBufferSize", ct.c_int),
("Context", ct.c_int*4),
("_reserved1", ct.c_ulong*8),
("Status", ct.c_int),
("ImageSize", ct.c_ulong),
("AncillarySize", ct.c_ulong),
("Width", ct.c_ulong),
("Height", ct.c_ulong),
("RegionX", ct.c_ulong),
("RegionY", ct.c_ulong),
("Format", ct.c_int),
("BitDepth", ct.c_ulong),
("BayerPattern", ct.c_int),
("FrameCount", ct.c_ulong),
("TimestampLo", ct.c_ulong),
("TimestampHi", ct.c_ulong),
("_reserved2", ct.c_ulong*32)
]
def __init__(self, buffersize):
self.ImageBuffer = ct.create_string_buffer(buffersize)
self.ImageBufferSize = ct.c_ulong(buffersize)
self.AncillaryBuffer = 0
self.AncillaryBufferSize = 0
self.img = None
self.hasImage = False
self.frame = None
def __del__(self):
#This function should disconnect from the AVT Camera
pverr(self.dll.PvCameraClose(self.handle))
def __init__(self, camera_id = -1, properties = {}, threaded = False):
#~ super(AVTCamera, self).__init__()
import platform
if platform.system() == "Windows":
self.dll = ct.windll.LoadLibrary("PvAPI.dll")
elif platform.system() == "Darwin":
self.dll = ct.CDLL("libPvAPI.dylib", ct.RTLD_GLOBAL)
else:
self.dll = ct.CDLL("libPvAPI.so")
if not self._pvinfo.get("initialized", False):
self.dll.PvInitialize()
self._pvinfo['initialized'] = True
#initialize. Note that we rely on listAllCameras being the next
#call, since it blocks on cameras initializing
camlist = self.listAllCameras()
if not len(camlist):
raise Exception("Couldn't find any cameras with the PvAVT driver. Use SampleViewer to confirm you have one connected.")
if camera_id < 9000: #camera was passed as an index reference
if camera_id == -1: #accept -1 for "first camera"
camera_id = 0
camera_id = camlist[camera_id].UniqueId
camera_id = long(camera_id)
self.handle = ct.c_uint()
init_count = 0
while self.dll.PvCameraOpen(camera_id,0,ct.byref(self.handle)) != 0: #wait until camera is availble
if init_count > 4: # Try to connect 5 times before giving up
raise Exception('Could not connect to camera, please verify with SampleViewer you can connect')
init_count += 1
time.sleep(1) # sleep and retry to connect to camera in a second
pverr(self.dll.PvCaptureStart(self.handle))
self.uniqueid = camera_id
self.setProperty("AcquisitionMode","SingleFrame")
self.setProperty("FrameStartTriggerMode","Freerun")
if properties.get("mode", "RGB") == 'gray':
self.setProperty("PixelFormat", "Mono8")
else:
self.setProperty("PixelFormat", "Rgb24")
#give some compatablity with other cameras
if properties.get("mode", ""):
properties.pop("mode")
if properties.get("height", ""):
properties["Height"] = properties["height"]
properties.pop("height")
if properties.get("width", ""):
properties["Width"] = properties["width"]
properties.pop("width")
for p in properties:
self.setProperty(p, properties[p])
if threaded:
self._thread = AVTCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
self.threaded = True
self.frame = None
self._refreshFrameStats()
def restart(self):
"""
This tries to restart the camera thread
"""
self._thread.stop()
self._thread = AVTCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
def listAllCameras(self):
"""
**SUMMARY**
List all cameras attached to the host
**RETURNS**
List of AVTCameraInfo objects, otherwise empty list
"""
camlist = (self.AVTCameraInfo*100)()
starttime = time.time()
while int(camlist[0].UniqueId) == 0 and time.time() - starttime < 10:
self.dll.PvCameraListEx(ct.byref(camlist), 100, None, ct.sizeof(self.AVTCameraInfo))
time.sleep(0.1) #keep checking for cameras until timeout
return [cam for cam in camlist if cam.UniqueId != 0]
def runCommand(self,command):
"""
**SUMMARY**
Runs a PvAVT Command on the camera
Valid Commands include:
* FrameStartTriggerSoftware
* AcquisitionAbort
* AcquisitionStart
* AcquisitionStop
* ConfigFileLoad
* ConfigFileSave
* TimeStampReset
* TimeStampValueLatch
**RETURNS**
0 on success
**EXAMPLE**
>>>c = AVTCamera()
>>>c.runCommand("TimeStampReset")
"""
return self.dll.PvCommandRun(self.handle,command)
def getProperty(self, name):
"""
**SUMMARY**
This retrieves the value of the AVT Camera attribute
There are around 140 properties for the AVT Camera, so reference the
AVT Camera and Driver Attributes pdf that is provided with
the driver for detailed information
Note that the error codes are currently ignored, so empty values
may be returned.
**EXAMPLE**
>>>c = AVTCamera()
>>>print c.getProperty("ExposureValue")
"""
valtype, perm = self._properties.get(name, (None, None))
if not valtype:
return None
val = ''
err = 0
if valtype == "Enum":
val = ct.create_string_buffer(100)
vallen = ct.c_long()
err = self.dll.PvAttrEnumGet(self.handle, name, val, 100, ct.byref(vallen))
val = str(val[:vallen.value])
elif valtype == "Uint32":
val = ct.c_uint()
err = self.dll.PvAttrUint32Get(self.handle, name, ct.byref(val))
val = int(val.value)
elif valtype == "Float32":
val = ct.c_float()
err = self.dll.PvAttrFloat32Get(self.handle, name, ct.byref(val))
val = float(val.value)
elif valtype == "String":
val = ct.create_string_buffer(100)
vallen = ct.c_long()
err = self.dll.PvAttrStringGet(self.handle, name, val, 100, ct.byref(vallen))
val = str(val[:vallen.value])
elif valtype == "Boolean":
val = ct.c_bool()
err = self.dll.PvAttrBooleanGet(self.handle, name, ct.byref(val))
val = bool(val.value)
#TODO, handle error codes
return val
#TODO, implement the PvAttrRange* functions
#def getPropertyRange(self, name)
def getAllProperties(self):
"""
**SUMMARY**
This returns a dict with the name and current value of the
documented PvAVT attributes
CAVEAT: it addresses each of the properties individually, so
this may take time to run if there's network latency
**EXAMPLE**
>>>c = AVTCamera(0)
>>>props = c.getAllProperties()
>>>print props['ExposureValue']
"""
props = {}
for p in self._properties.keys():
props[p] = self.getProperty(p)
return props
def setProperty(self, name, value, skip_buffer_size_check=False):
"""
**SUMMARY**
This sets the value of the AVT Camera attribute.
There are around 140 properties for the AVT Camera, so reference the
AVT Camera and Driver Attributes pdf that is provided with
the driver for detailed information
By default, we will also refresh the height/width and bytes per
frame we're expecting -- you can manually bypass this if you want speed
Returns the raw PvAVT error code (0 = success)
**Example**
>>>c = AVTCamera()
>>>c.setProperty("ExposureValue", 30000)
>>>c.getImage().show()
"""
valtype, perm = self._properties.get(name, (None, None))
if not valtype:
return None
if valtype == "Uint32":
err = self.dll.PvAttrUint32Set(self.handle, name, ct.c_uint(int(value)))
elif valtype == "Float32":
err = self.dll.PvAttrFloat32Set(self.handle, name, ct.c_float(float(value)))
elif valtype == "Enum":
err = self.dll.PvAttrEnumSet(self.handle, name, str(value))
elif valtype == "String":
err = self.dll.PvAttrStringSet(self.handle, name, str(value))
elif valtype == "Boolean":
err = self.dll.PvAttrBooleanSet(self.handle, name, ct.c_bool(bool(value)))
#just to be safe, re-cache the camera metadata
if not skip_buffer_size_check:
self._refreshFrameStats()
return err
def getImage(self, timeout = 5000, hwtrigger = False):
"""
**SUMMARY**
Extract an Image from the Camera, returning the value. No matter
what the image characteristics on the camera, the Image returned
will be RGB 8 bit depth, if camera is in greyscale mode it will
be 3 identical channels.
**EXAMPLE**
>>>c = AVTCamera()
>>>c.getImage().show()
"""
if self.frame != None:
st = time.time()
try:
pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(self.frame), timeout) )
except Exception, e:
print "Exception waiting for frame:", e
print "Time taken:",time.time() - st
self.frame = None
raise(e)
img = self.unbuffer()
self.frame = None
return img
elif self.threaded:
self._thread.lock.acquire()
try:
img = self._buffer.pop()
self._lastimage = img
except IndexError:
img = self._lastimage
self._thread.lock.release()
else:
self.runCommand("AcquisitionStart")
frame = self._getFrame(timeout,hwtrigger)
img = Image(pil.fromstring(self.imgformat,
(self.width, self.height),
frame.ImageBuffer[:int(frame.ImageBufferSize)]))
self.runCommand("AcquisitionStop")
return img
def setupASyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('FrameStartTriggerMode','Software')
def setupSyncMode(self):
self.setProperty('AcquisitionMode','Continuous')
self.setProperty('FrameStartTriggerMode','FreeRun')
def unbuffer(self):
img = Image(pil.fromstring(self.imgformat,
(self.width, self.height),
self.frame.ImageBuffer[:int(self.frame.ImageBufferSize)]))
return img
def _refreshFrameStats(self):
self.width = self.getProperty("Width")
self.height = self.getProperty("Height")
self.buffersize = self.getProperty("TotalBytesPerFrame")
self.pixelformat = self.getProperty("PixelFormat")
self.imgformat = 'RGB'
if self.pixelformat == 'Mono8':
self.imgformat = 'L'
def _getFrame(self, timeout = 5000, hwtrigger = False):
#return the AVTFrame object from the camera, timeout in ms
#need to multiply by bitdepth
if hwtrigger == False:
try:
frame = self.AVTFrame(self.buffersize)
pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(frame), None) )
st = time.time()
try:
pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(frame), timeout) )
except Exception, e:
print "Exception waiting for frame:", e
print "Time taken:",time.time() - st
raise(e)
except Exception, e:
print "Exception aquiring frame:", e
raise(e)
else:
try:
frame = self.AVTFrame(self.buffersize)
pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(frame), None) )
st = time.time()
try:
while self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(frame), timeout) == 17: #timeout error
print ("waiting trigger")
except Exception, e:
print "Exception waiting for frame:", e
print "Time taken:",time.time() - st
raise(e)
except Exception, e:
print "Exception aquiring frame:", e
raise(e)
return frame
def acquire(self):
self.frame = self.AVTFrame(self.buffersize)
try:
self.runCommand("AcquisitionStart")
pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(self.frame), None) )
self.runCommand("AcquisitionStop")
except Exception, e:
print "Exception aquiring frame:", e
raise(e)
class GigECamera(Camera):
"""
GigE Camera driver via Aravis
"""
def __init__(self, camera_id = None, properties = {}, threaded = False):
try:
from gi.repository import Aravis
except:
print "GigE is supported by the Aravis library, download and build from https://github.com/sightmachine/aravis"
print "Note that you need to set GI_TYPELIB_PATH=$GI_TYPELIB_PATH:(PATH_TO_ARAVIS)/src for the GObject Introspection"
sys.exit()
self._cam = Aravis.Camera.new (None)
self._pixel_mode = "RGB"
if properties.get("mode", False):
self._pixel_mode = properties.pop("mode")
if self._pixel_mode == "gray":
self._cam.set_pixel_format (Aravis.PIXEL_FORMAT_MONO_8)
else:
self._cam.set_pixel_format (Aravis.PIXEL_FORMAT_BAYER_BG_8) #we'll use bayer (basler cams)
#TODO, deal with other pixel formats
if properties.get("roi", False):
roi = properties['roi']
self._cam.set_region(*roi)
#TODO, check sensor size
if properties.get("width", False):
#TODO, set internal function to scale results of getimage
pass
if properties.get("framerate", False):
self._cam.set_frame_rate(properties['framerate'])
self._stream = self._cam.create_stream (None, None)
payload = self._cam.get_payload()
self._stream.push_buffer(Aravis.Buffer.new_allocate (payload))
[x,y,width,height] = self._cam.get_region ()
self._height, self._width = height, width
def getImage(self):
camera = self._cam
camera.start_acquisition()
buff = self._stream.pop_buffer()
self.capturetime = buff.timestamp_ns / 1000000.0
img = np.fromstring(ct.string_at(buff.data_address(), buff.size), dtype = np.uint8).reshape(self._height, self._width)
rgb = cv2.cvtColor(img, cv2.COLOR_BAYER_BG2BGR)
self._stream.push_buffer(buff)
camera.stop_acquisition()
#TODO, we should handle software triggering (separate capture and get image events)
return Image(rgb)
def getPropertyList(self):
l = [
'available_pixel_formats',
'available_pixel_formats_as_display_names',
'available_pixel_formats_as_strings',
'binning',
'device_id',
'exposure_time',
'exposure_time_bounds',
'frame_rate',
'frame_rate_bounds',
'gain',
'gain_bounds',
'height_bounds',
'model_name',
'payload',
'pixel_format',
'pixel_format_as_string',
'region',
'sensor_size',
'trigger_source',
'vendor_name',
'width_bounds'
]
return l
def getProperty(self, name = None):
'''
This function get's the properties availble to the camera
Usage:
> camera.getProperty('region')
> (0, 0, 128, 128)
Available Properties:
see function camera.getPropertyList()
'''
if name == None:
print "You need to provide a property, available properties are:"
print ""
for p in self.getPropertyList():
print p
return
stringval = "get_{}".format(name)
try:
return getattr(self._cam, stringval)()
except:
print 'Property {} does not appear to exist'.format(name)
return None
def setProperty(self, name = None, *args):
'''
This function sets the property available to the camera
Usage:
> camera.setProperty('region',(256,256))
Available Properties:
see function camera.getPropertyList()
'''
if name == None:
print "You need to provide a property, available properties are:"
print ""
for p in self.getPropertyList():
print p
return
if len(args) <= 0:
print "You must provide a value to set"
return
stringval = "set_{}".format(name)
try:
return getattr(self._cam, stringval)(*args)
except:
print 'Property {} does not appear to exist or value is not in correct format'.format(name)
return None
def getAllProperties(self):
'''
This function just prints out all the properties available to the camera
'''
for p in self.getPropertyList():
print "{}: {}".format(p,self.getProperty(p))
class VimbaCameraThread(threading.Thread):
camera = None
run = True
verbose = False
lock = None
logger = None
framerate = 0
def __init__(self, camera):
super(VimbaCameraThread, self).__init__()
self._stop = threading.Event()
self.camera = camera
self.lock = threading.Lock()
self.name = 'Thread-Camera-ID-' + str(self.camera.uniqueid)
def run(self):
counter = 0
timestamp = time.time()
while self.run:
self.lock.acquire()
img = self.camera._captureFrame(1000)
self.camera._buffer.appendleft(img)
self.lock.release()
counter += 1
time.sleep(0.01)
if time.time() - timestamp >= 1:
self.camera.framerate = counter
counter = 0
timestamp = time.time()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
class VimbaCamera(FrameSource):
"""
**SUMMARY**
VimbaCamera is a wrapper for the Allied Vision cameras,
such as the "manta" series.
This requires the
1) Vimba SDK provided from Allied Vision
http://www.alliedvisiontec.com/us/products/software/vimba-sdk.html
2) Pyvimba Python library
TODO: <INSERT URL>
Note that as of time of writing, the VIMBA driver is not available
for Mac.
All camera properties are directly from the Vimba SDK manual -- if not
specified it will default to whatever the camera state is. Cameras
can either by
**EXAMPLE**
>>> cam = VimbaCamera(0, {"width": 656, "height": 492})
>>>
>>> img = cam.getImage()
>>> img.show()
"""
def _setupVimba(self):
from pymba import Vimba
self._vimba = Vimba()
self._vimba.startup()
system = self._vimba.getSystem()
if system.GeVTLIsPresent:
system.runFeatureCommand("GeVDiscoveryAllOnce")
time.sleep(0.2)
def __del__(self):
#This function should disconnect from the Vimba Camera
if self._camera is not None:
if self.threaded:
self._thread.stop()
time.sleep(0.2)
if self._frame is not None:
self._frame.revokeFrame()
self._frame = None
self._camera.closeCamera()
self._vimba.shutdown()
def shutdown(self):
"""You must call this function if you are using threaded=true when you are finished
to prevent segmentation fault"""
# REQUIRED TO PREVENT SEGMENTATION FAULT FOR THREADED=True
if (self._camera):
self._camera.closeCamera()
self._vimba.shutdown()
def __init__(self, camera_id = -1, properties = {}, threaded = False):
if not VIMBA_ENABLED:
raise Exception("You don't seem to have the pymba library installed. This will make it hard to use a AVT Vimba Camera.")
self._vimba = None
self._setupVimba()
camlist = self.listAllCameras()
self._camTable = {}
self._frame = None
self._buffer = None # Buffer to store images
self._buffersize = 10 # Number of images to keep in the rolling image buffer for threads
self._lastimage = None # Last image loaded into memory
self._thread = None
self._framerate = 0
self.threaded = False
self._properties = {}
self._camera = None
i = 0
for cam in camlist:
self._camTable[i] = {'id': cam.cameraIdString}
i += 1
if not len(camlist):
raise Exception("Couldn't find any cameras with the Vimba driver. Use VimbaViewer to confirm you have one connected.")
if camera_id < 9000: #camera was passed as an index reference
if camera_id == -1: #accept -1 for "first camera"
camera_id = 0
if (camera_id > len(camlist)):
raise Exception("Couldn't find camera at index %d." % camera_id)
cam_guid = camlist[camera_id].cameraIdString
else:
raise Exception("Index %d is too large" % camera_id)
self._camera = self._vimba.getCamera(cam_guid)
self._camera.openCamera()
self.uniqueid = cam_guid
self.setProperty("AcquisitionMode","SingleFrame")
self.setProperty("TriggerSource","Freerun")
# TODO: FIX
if properties.get("mode", "RGB") == 'gray':
self.setProperty("PixelFormat", "Mono8")
else:
fmt = "RGB8Packed" # alternatively use BayerRG8
self.setProperty("PixelFormat", "BayerRG8")
#give some compatablity with other cameras
if properties.get("mode", ""):
properties.pop("mode")
if properties.get("height", ""):
properties["Height"] = properties["height"]
properties.pop("height")
if properties.get("width", ""):
properties["Width"] = properties["width"]
properties.pop("width")
for p in properties:
self.setProperty(p, properties[p])
if threaded:
self._thread = VimbaCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
self.threaded = True
self._refreshFrameStats()
def restart(self):
"""
This tries to restart the camera thread
"""
self._thread.stop()
self._thread = VimbaCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
def listAllCameras(self):
"""
**SUMMARY**
List all cameras attached to the host
**RETURNS**
List of VimbaCamera objects, otherwise empty list
VimbaCamera objects are defined in the pymba module
"""
cameraIds = self._vimba.getCameraIds()
ar = []
for cameraId in cameraIds:
ar.append(self._vimba.getCamera(cameraId))
return ar
def runCommand(self,command):
"""
**SUMMARY**
Runs a Vimba Command on the camera
Valid Commands include:
* AcquisitionAbort
* AcquisitionStart
* AcquisitionStop
**RETURNS**
0 on success
**EXAMPLE**
>>>c = VimbaCamera()
>>>c.runCommand("TimeStampReset")
"""
return self._camera.runFeatureCommand(command)
def getProperty(self, name):
"""
**SUMMARY**
This retrieves the value of the Vimba Camera attribute
There are around 140 properties for the Vimba Camera, so reference the
Vimba Camera pdf that is provided with
the SDK for detailed information
Throws VimbaException if property is not found or not implemented yet.
**EXAMPLE**
>>>c = VimbaCamera()
>>>print c.getProperty("ExposureMode")
"""
return self._camera.__getattr__(name)
#TODO, implement the PvAttrRange* functions
#def getPropertyRange(self, name)
def getAllProperties(self):
"""
**SUMMARY**
This returns a dict with the name and current value of the
documented Vimba attributes
CAVEAT: it addresses each of the properties individually, so
this may take time to run if there's network latency
**EXAMPLE**
>>>c = VimbaCamera(0)
>>>props = c.getAllProperties()
>>>print props['ExposureMode']
"""
from pymba import VimbaException
# TODO
ar = {}
c = self._camera
cameraFeatureNames = c.getFeatureNames()
for name in cameraFeatureNames:
try:
ar[name] = c.__getattr__(name)
except VimbaException:
# Ignore features not yet implemented
pass
return ar
def setProperty(self, name, value, skip_buffer_size_check=False):
"""
**SUMMARY**
This sets the value of the Vimba Camera attribute.
There are around 140 properties for the Vimba Camera, so reference the
Vimba Camera pdf that is provided with
the SDK for detailed information
Throws VimbaException if property not found or not yet implemented
**Example**
>>>c = VimbaCamera()
>>>c.setProperty("ExposureAutoRate", 200)
>>>c.getImage().show()
"""
ret = self._camera.__setattr__(name, value)
#just to be safe, re-cache the camera metadata
if not skip_buffer_size_check:
self._refreshFrameStats()
return ret
def getImage(self):
"""
**SUMMARY**
Extract an Image from the Camera, returning the value. No matter
what the image characteristics on the camera, the Image returned
will be RGB 8 bit depth, if camera is in greyscale mode it will
be 3 identical channels.
**EXAMPLE**
>>>c = VimbaCamera()
>>>c.getImage().show()
"""
if self.threaded:
self._thread.lock.acquire()
try:
img = self._buffer.pop()
self._lastimage = img
except IndexError:
img = self._lastimage
self._thread.lock.release()
else:
img = self._captureFrame()
return img
def setupASyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('TriggerSource','Software')
def setupSyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('TriggerSource','Freerun')
def _refreshFrameStats(self):
self.width = self.getProperty("Width")
self.height = self.getProperty("Height")
self.pixelformat = self.getProperty("PixelFormat")
self.imgformat = 'RGB'
if self.pixelformat == 'Mono8':
self.imgformat = 'L'
def _getFrame(self):
if not self._frame:
self._frame = self._camera.getFrame() # creates a frame
self._frame.announceFrame()
return self._frame
def _captureFrame(self, timeout = 5000):
try:
c = self._camera
f = self._getFrame()
colorSpace = ColorSpace.BGR
if self.pixelformat == 'Mono8':
colorSpace = ColorSpace.GRAY
c.startCapture()
f.queueFrameCapture()
c.runFeatureCommand('AcquisitionStart')
c.runFeatureCommand('AcquisitionStop')
try:
f.waitFrameCapture(timeout)
except Exception, e:
print "Exception waiting for frame: %s: %s" % (e, traceback.format_exc())
raise(e)
imgData = f.getBufferByteData()
moreUsefulImgData = np.ndarray(buffer = imgData,
dtype = np.uint8,
shape = (f.height, f.width, 1))
rgb = cv2.cvtColor(moreUsefulImgData, cv2.COLOR_BAYER_RG2RGB)
c.endCapture()
return Image(rgb, colorSpace=colorSpace, cv2image=imgData)
except Exception, e:
print "Exception acquiring frame: %s: %s" % (e, traceback.format_exc())
raise(e)
|
beni55/SimpleCV
|
SimpleCV/Camera.py
|
Python
|
bsd-3-clause
| 132,863
|
[
"VisIt"
] |
cd14e8ec8436bc4ddb03a88456916517d75d26e7323a0038672f7942bde7a5d3
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:10:58 2017
@author: tkc
"""
import os
import pandas as pd
import numpy as np
from tkinter import filedialog
AESQUANTPARAMFILE='C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv'
class AESspectrum():
''' Single instance of AES spectra file created from row of spelist (child of AESdataset)
load file from AESdataset (pd dataframe row)
#TODO add direct file load? '''
def __init__(self, AESdataset, rowindex):
# can be opened with AESdataset parent and associated row from
# open files from directory arg
self.AESdataset=AESdataset
self.path=self.parent.path # same path as AESdataset parent
# load params from batch processing of AESdataset
row=AESdataset.Augerparamlog.iloc[rowindex]
self.filename=row.Filename
self.numareas=row.Numareas
self.evbreaks=row.Evbreaks # TODO data type?
self.spectype = row.Type.lower() # multiplex or survey
self.AESdf = None # entire AES dataframe (all areas)
self.energy = None # same for all cols
self.aesquantparams = None
self.loadAESquantparams()
self.elems_smdiff = None
self.get_elems_smdiff() # get quant from existing smdifpeakslog
self.elems_integ = None #
self.elemdata = None
self.getelemdata()
print('Auger QM file', self.filename, 'loaded.')
def open_csvfile(self):
''' Read Auger spectral file '''
self.AESdf=pd.read_csv(self.filename.replace('.spe','.csv'))
self.colset=self.AESdf.columns # Counts1, Counts2, S7D71, S7D72, etc.
self.energy=self.AESdf['Energy']
self.backfit=self.EDXdf['Backfit']
self.subdata=self.EDXdf['Subdata']
print('EDXfile ', self.filename,' loaded.')
def loadAESquantparams(self):
''' Loads standard values of Auger quant parameters
TODO what about dealing with local shifts '''
# Checkbutton option for local (or standard) AESquantparams in file loader?
print('AESquantparams loaded')
self.aesquantparams=pd.read_csv(AESQUANTPARAMFILE, encoding='utf-8')
def get_elems_smdiff(self):
''' Finds element quant already performed from smdifflog (within AESdataset) '''
match=self.AESdataset.Smdifpeakslog.loc[ (self.AESdataset.Smdifpeakslog['Filename']==self.filename)]
# should contain row for each element included in quant
self.elems_smdiff=[] # elem/ peak name
self.smdiff_shifts=[] # shifts from ideal position
self.smdiff_ampl=[] # negintensity - posintensity
self.smdiff_widths=[] # ev diff between negpeak and pospeak
for index, row in match.iterrows():
self.elems_smdiff.append(row.PeakID)
self.smdiff_shifts.append(row.Shift)
self.smdiff_ampl.append(row.Amplitude)
self.smdiff_widths.append(row.Peakwidth)
def get_elems_integ(self):
''' Pull existing quant results from integ log file (if present) '''
pass
def savecsv():
''' Save any changes to underlying csv file '''
class AESdataset():
''' loads all dataframes with Auger parameters from current project folder '''
def __init__(self, *args, **kwargs):
self.path = filedialog.askdirectory()
# open files
self.open_main_files()
self.filelist=np.ndarray.tolist(self.Augerparamlog.Filenumber.unique())
self.numfiles=len(self.Augerparamlog)
print(str(self.numfiles),' loaded from EDXdataset.')
def open_main_files(self):
''' Auto loads Auger param files from working directory including
EDXparalog- assorted params associated w/ each SEM-EDX or TEM-EDX emsa file
Backfitparamslog - ranges and parameters for EDX background fits
Integquantlog - subtracted and corrected counts for chosen elements
Peakfitlog - params of gaussian fits to each element (xc, width, peakarea, Y0, rsquared)'''
if os.path.exists('Augerparamlog.csv'):
self.Augerparamlog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
self.spelist=self.Augerparamlog[pd.notnull(self.Augerparamlog['Areas'])]
else:
self.Augerparamlog=pd.DataFrame()
self.spelist=pd.DataFrame()
if os.path.exists('Smdifpeakslog.csv'):
self.Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
else:
self.Smdifpeakslog=pd.DataFrame()
if os.path.exists('Backfitlog.csv'):
self.Backfitlog=pd.read_csv('Backfitlog.csv', encoding='cp437')
else:
self.Backfitlog=pd.DataFrame()
if os.path.exists('Integquantlog.csv'):
self.Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
else:
self.Integquantlog=pd.DataFrame()
# Print TEM or SEM to console based on beam kV
try:
self.AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv', encoding='utf-8')
except:
self.AESquantparams=pd.DataFrame()
|
tkcroat/Augerquant
|
Development/AES_dataset_class.py
|
Python
|
mit
| 5,287
|
[
"Gaussian"
] |
d492c0a47926cd683040a073e46118656e32626b35acbf26f816767aa4324764
|
"""
Tests for intensity combination.
"""
from __future__ import annotations
from unittest.mock import Mock
import pytest
from dxtbx.model import Crystal, Experiment
from dials.algorithms.scaling.combine_intensities import (
MultiDatasetIntensityCombiner,
SingleDatasetIntensityCombiner,
combine_intensities,
)
from dials.algorithms.scaling.scaling_utilities import calculate_prescaling_correction
from dials.array_family import flex
@pytest.fixture(scope="module")
def test_exp_P1():
"""Create a mock experiments object."""
exp = Experiment()
exp_dict = {
"__id__": "crystal",
"real_space_a": [1.0, 0.0, 0.0],
"real_space_b": [0.0, 1.0, 0.0],
"real_space_c": [0.0, 0.0, 1.0],
"space_group_hall_symbol": " P 1",
}
crystal = Crystal.from_dict(exp_dict)
exp.crystal = crystal
return exp
def generate_simple_table(prf=True):
"""Generate a reflection table for testing intensity combination.
The numbers are contrived to make sum intensities agree well at high
intensity but terribly at low and vice versa for profile intensities."""
reflections = flex.reflection_table()
reflections["miller_index"] = flex.miller_index(
[
(0, 0, 1),
(0, 0, 1),
(0, 0, 1),
(0, 0, 1),
(0, 0, 1),
(0, 0, 2),
(0, 0, 2),
(0, 0, 2),
(0, 0, 2),
(0, 0, 2),
(0, 0, 3),
(0, 0, 3),
(0, 0, 3),
(0, 0, 3),
(0, 0, 3),
(0, 0, 4),
(0, 0, 4),
(0, 0, 4),
(0, 0, 4),
(0, 0, 4),
(0, 0, 5),
(0, 0, 5),
(0, 0, 5),
(0, 0, 5),
(0, 0, 5),
]
)
reflections["inverse_scale_factor"] = flex.double(25, 1.0)
# Contrive an example that should give the best cc12 when combined.
# make sum intensities agree well at high intensity but terribly at low
# and vice versa for profile intensities.
# profile less consistent at high intensity here
# sumless consistent at low intensity here
reflections["intensity.sum.value"] = flex.double(
[
10000.0,
11000.0,
9000.0,
8000.0,
12000.0,
500.0,
5600.0,
5500.0,
2000.0,
6000.0,
100.0,
50.0,
150.0,
75.0,
125.0,
30.0,
10.0,
2.0,
35.0,
79.0,
1.0,
10.0,
20.0,
10.0,
5.0,
]
)
reflections["intensity.sum.variance"] = flex.double(
[10000] * 5 + [5000] * 5 + [100] * 5 + [30] * 5 + [10] * 5
)
reflections.set_flags(flex.bool(25, False), reflections.flags.outlier_in_scaling)
reflections.set_flags(flex.bool(25, True), reflections.flags.integrated)
reflections["lp"] = flex.double(25, 0.5)
if prf:
reflections["intensity.prf.value"] = flex.double(
[
10000.0,
16000.0,
12000.0,
6000.0,
9000.0,
5000.0,
2000.0,
1500.0,
1300.0,
9000.0,
100.0,
80.0,
120.0,
90.0,
100.0,
30.0,
40.0,
50.0,
30.0,
30.0,
10.0,
12.0,
9.0,
8.0,
10.0,
]
)
reflections["intensity.prf.variance"] = flex.double(
[1000] * 5 + [500] * 5 + [10] * 5 + [3] * 5 + [1] * 5
)
reflections = calculate_prescaling_correction(reflections)
return reflections
def test_combine_intensities_prf_sum(test_exp_P1):
reflections = flex.reflection_table()
reflections["intensity.sum.value"] = flex.double([100.0, 100.0, 100.0, 100.0])
reflections["intensity.prf.value"] = flex.double([200.0, 200.0, 200.0, 200.0])
reflections["intensity.sum.variance"] = flex.double(4, 100)
reflections["intensity.prf.variance"] = flex.double(4, 200)
reflections["prescaling_correction"] = flex.double(4, 1.0)
reflections.set_flags(
flex.bool([False, False, True, True]), reflections.flags.integrated_prf
)
reflections.set_flags(
flex.bool([True, False, False, True]), reflections.flags.integrated_sum
)
intensities, _ = combine_intensities(reflections, Imid=100.0)
# if prf not successful - set as sum. Only last refl should be combined here.
assert list(intensities) == [100.0, 100.0, 200.0, 150.0]
def test_combine_intensities(test_exp_P1):
"""Test the combine intensities function for a single dataset"""
reflections = generate_simple_table()
scaler = Mock()
scaler.reflection_table = reflections
scaler.suitable_refl_for_scaling_sel = flex.bool(reflections.size(), True)
scaler.outliers = flex.bool(reflections.size(), False)
scaler.free_set_selection = flex.bool(reflections.size(), False)
scaler.experiment = test_exp_P1
scaler.params.reflection_selection.combine.Imid = None
combiner = SingleDatasetIntensityCombiner(scaler)
Imid = combiner.max_key
intensity, variance = combiner.calculate_suitable_combined_intensities()
# Imid being 1200.0 should be best for this contrived example
assert Imid == 1200.0
# Due to nature of crossover, just require 2% tolerance for this example
assert list(intensity[0:5]) == pytest.approx(
list(
reflections["intensity.sum.value"][0:5]
* reflections["prescaling_correction"][0:5]
),
rel=2e-2,
)
assert list(intensity[20:25]) == pytest.approx(
list(
reflections["intensity.prf.value"][20:25]
* reflections["prescaling_correction"][20:25]
),
rel=2e-2,
)
assert list(variance[0:5]) == pytest.approx(
list(
reflections["intensity.sum.variance"][0:5]
* flex.pow2(reflections["prescaling_correction"][0:5])
),
rel=2e-2,
)
assert list(variance[20:25]) == pytest.approx(
list(
reflections["intensity.prf.variance"][20:25]
* flex.pow2(reflections["prescaling_correction"][20:25])
),
rel=2e-2,
)
combiner.max_key = 0 # prf
intensity, variance = combiner.calculate_suitable_combined_intensities()
assert list(intensity) == pytest.approx(
list(reflections["intensity.prf.value"] * reflections["prescaling_correction"]),
rel=2e-2,
)
assert list(variance) == pytest.approx(
list(
reflections["intensity.prf.variance"]
* flex.pow2(reflections["prescaling_correction"])
),
rel=2e-2,
)
combiner.max_key = 1 # sum
intensity, variance = combiner.calculate_suitable_combined_intensities()
assert list(intensity) == pytest.approx(
list(reflections["intensity.sum.value"] * reflections["prescaling_correction"]),
rel=2e-2,
)
assert list(variance) == pytest.approx(
list(
reflections["intensity.sum.variance"]
* flex.pow2(reflections["prescaling_correction"])
),
rel=2e-2,
)
def test_combine_intensities_multi_dataset(test_exp_P1):
"""Test the combine intensities function for multiple datasets"""
r1 = generate_simple_table()
r1["partiality"] = flex.double(25, 1.0)
r2 = generate_simple_table(prf=False)
scaler1 = Mock()
scaler1.reflection_table = r1
scaler1.suitable_refl_for_scaling_sel = flex.bool(r1.size(), True)
scaler1.outliers = flex.bool(r1.size(), False)
scaler1.free_set_selection = flex.bool(r1.size(), False)
scaler1.experiment = test_exp_P1
scaler1.params.reflection_selection.combine.Imid = None
scaler2 = Mock()
scaler2.reflection_table = r2
scaler2.suitable_refl_for_scaling_sel = flex.bool(r2.size(), True)
scaler2.outliers = flex.bool(r2.size(), False)
scaler2.free_set_selection = flex.bool(r2.size(), False)
scaler2.experiment = test_exp_P1
scaler2.params.reflection_selection.combine.Imid = None
multiscaler = Mock()
multiscaler.active_scalers = [scaler1, scaler2]
multiscaler.experiment = test_exp_P1
multiscaler.params.reflection_selection.combine.Imid = None
combiner = MultiDatasetIntensityCombiner(multiscaler)
Imid = combiner.max_key
assert pytest.approx(Imid) == 1200.0
|
dials/dials
|
tests/algorithms/scaling/test_combine_intensities.py
|
Python
|
bsd-3-clause
| 8,762
|
[
"CRYSTAL"
] |
ec4061b4d0299e61d188d08043df49211e8229b557af8ea9f4a07ff4ebed7154
|
"""Unit tests of MQConnectionManager in the DIRAC.Resources.MessageQueue.MConnectionManager
Also, test of internal functions for mq connection storage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# ignore use of __functions, _functions
# pylint: disable=no-member, protected-access
import unittest
import mock
from DIRAC import S_OK
from DIRAC.Resources.MessageQueue.MQConnectionManager import MQConnectionManager
class TestMQConnectionManager(unittest.TestCase):
def setUp(self):
self.maxDiff = None # To show full difference between structures in case of error
dest = {}
dest.update({"/queue/test1": ["producer4", "consumer1", "consumer2", "consumer4"]})
dest.update({"/queue/test2": ["producer2", "consumer1", "consumer2"]})
dest.update({"/topic/test1": ["producer1"]})
dest4 = {"/queue/test3": ["producer1", "consumer2", "consumer3", "consumer4"]}
conn1 = {"MQConnector": "TestConnector1", "destinations": dest}
conn2 = {"MQConnector": "TestConnector2", "destinations": dest4}
storage = {"mardirac3.in2p3.fr": conn1, "testdir.blabla.ch": conn2}
self.mgr = MQConnectionManager(connectionStorage=storage)
def tearDown(self):
pass
class TestMQConnectionStorageFunctions_connectionExists(TestMQConnectionManager):
def test_success(self):
self.assertTrue(self.mgr._MQConnectionManager__connectionExists("mardirac3.in2p3.fr"))
def test_failure(self):
self.assertFalse(self.mgr._MQConnectionManager__connectionExists("nonexisting"))
class TestMQConnectionStorageFunctions_destinationExists(TestMQConnectionManager):
def test_success(self):
self.assertTrue(self.mgr._MQConnectionManager__destinationExists("mardirac3.in2p3.fr", "/queue/test1"))
def test_failure(self):
self.assertFalse(self.mgr._MQConnectionManager__destinationExists("nonexisting", "/queue/test1"))
def test_failure2(self):
self.assertFalse(self.mgr._MQConnectionManager__destinationExists("mardirac3.in2p3.fr", "/queue/nonexisting"))
class TestMQConnectionStorageFunctions_messengerExists(TestMQConnectionManager):
def test_success(self):
self.assertTrue(
self.mgr._MQConnectionManager__messengerExists("mardirac3.in2p3.fr", "/queue/test1", "consumer2")
)
self.assertTrue(
self.mgr._MQConnectionManager__messengerExists("mardirac3.in2p3.fr", "/queue/test1", "producer4")
)
def test_failure(self):
self.assertFalse(self.mgr._MQConnectionManager__messengerExists("noexisting", "/queue/test1", "producer4"))
def test_failure2(self):
self.assertFalse(
self.mgr._MQConnectionManager__messengerExists("mardirac3.in2p3.fr", "/queue/nonexisting", "producer4")
)
def test_failure3(self):
self.assertFalse(
self.mgr._MQConnectionManager__messengerExists("mardirac3.in2p3.fr", "/queue/test1", "producer10")
)
class TestMQConnectionStorageFunctions_getConnection(TestMQConnectionManager):
def test_success(self):
expectedConn = {
"MQConnector": "TestConnector2",
"destinations": {"/queue/test3": ["producer1", "consumer2", "consumer3", "consumer4"]},
}
self.assertEqual(self.mgr._MQConnectionManager__getConnection("testdir.blabla.ch"), expectedConn)
def test_failure(self):
self.assertEqual(self.mgr._MQConnectionManager__getConnection("nonexisiting"), {})
class TestMQConnectionStorageFunctions_getAllConnections(TestMQConnectionManager):
def test_success(self):
expectedOutput = ["testdir.blabla.ch", "mardirac3.in2p3.fr"]
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllConnections()), sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getConnector(TestMQConnectionManager):
def test_success(self):
self.assertEqual(self.mgr._MQConnectionManager__getConnector("testdir.blabla.ch"), "TestConnector2")
def test_failure(self):
self.assertIsNone(self.mgr._MQConnectionManager__getConnector("nonexisiting"))
class TestMQConnectionStorageFunctions_setConnector(TestMQConnectionManager):
def test_success(self):
self.assertTrue(self.mgr._MQConnectionManager__setConnector("testdir.blabla.ch", "TestConnector5"))
self.assertEqual(self.mgr._MQConnectionManager__getConnector("testdir.blabla.ch"), "TestConnector5")
def test_failure(self):
self.assertFalse(self.mgr._MQConnectionManager__setConnector("nonexisiting", "TestConnector3"))
class TestMQConnectionStorageFunctions_getDestinations(TestMQConnectionManager):
def test_success(self):
expectedDests = {
"/queue/test1": ["producer4", "consumer1", "consumer2", "consumer4"],
"/queue/test2": ["producer2", "consumer1", "consumer2"],
"/topic/test1": ["producer1"],
}
self.assertEqual(self.mgr._MQConnectionManager__getDestinations("mardirac3.in2p3.fr"), expectedDests)
def test_failure(self):
self.assertEqual(self.mgr._MQConnectionManager__getDestinations("nonexisiting"), {})
class TestMQConnectionStorageFunctions_getMessengersId(TestMQConnectionManager):
def test_success(self):
expectedMess = ["producer4", "consumer1", "consumer2", "consumer4"]
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersId("mardirac3.in2p3.fr", "/queue/test1"), expectedMess
)
def test_success2(self):
expectedMess2 = ["producer2", "consumer1", "consumer2"]
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersId("mardirac3.in2p3.fr", "/queue/test2"), expectedMess2
)
def test_failure(self):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId("nonexisiting", "/queue/test2"), [])
def test_failure2(self):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId("mardirac3.in2p3.fr", "nonexisiting"), [])
class TestMQConnectionStorageFunctions_getMessengersIdWithType(TestMQConnectionManager):
def test_success(self):
expectedMess = ["producer4"]
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersIdWithType("mardirac3.in2p3.fr", "/queue/test1", "producer"),
expectedMess,
)
def test_success2(self):
expectedMess2 = ["producer2"]
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersIdWithType("mardirac3.in2p3.fr", "/queue/test2", "producer"),
expectedMess2,
)
def test_success3(self):
expectedMess = ["consumer1", "consumer2", "consumer4"]
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersIdWithType("mardirac3.in2p3.fr", "/queue/test1", "consumer"),
expectedMess,
)
def test_success4(self):
expectedMess2 = ["consumer1", "consumer2"]
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersIdWithType("mardirac3.in2p3.fr", "/queue/test2", "consumer"),
expectedMess2,
)
def test_failure(self):
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersIdWithType("nonexisiting", "/queue/test2", "producer"), []
)
def test_failure2(self):
self.assertEqual(
self.mgr._MQConnectionManager__getMessengersIdWithType("mardirac3.in2p3.fr", "nonexisiting", "producer"), []
)
class TestMQConnectionStorageFunctions_getAllMessengersInfo(TestMQConnectionManager):
def test_success(self):
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getAllMessengersId(TestMQConnectionManager):
def test_success(self):
expectedOutput = [
"producer4",
"consumer1",
"consumer2",
"consumer4",
"producer2",
"consumer1",
"consumer2",
"producer1",
"producer1",
"consumer2",
"consumer3",
"consumer4",
]
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersId()), sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getAllMessengersIdWithType(TestMQConnectionManager):
def test_success(self):
expectedOutput = [
"consumer1",
"consumer2",
"consumer4",
"consumer1",
"consumer2",
"consumer2",
"consumer3",
"consumer4",
]
self.assertEqual(
sorted(self.mgr._MQConnectionManager__getAllMessengersIdWithType("consumer")), sorted(expectedOutput)
)
expectedOutput = ["producer4", "producer2", "producer1", "producer1"]
self.assertEqual(
sorted(self.mgr._MQConnectionManager__getAllMessengersIdWithType("producer")), sorted(expectedOutput)
)
class TestMQConnectionStorageFunctions_addMessenger(TestMQConnectionManager):
def test_success(self):
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer1",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertTrue(self.mgr._MQConnectionManager__addMessenger("mardirac3.in2p3.fr", "/queue/test1", "producer1"))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
def test_success2(self):
# new queue
expectedOutput = [
"mardirac3.in2p3.fr/queue/test5/producer8",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertTrue(self.mgr._MQConnectionManager__addMessenger("mardirac3.in2p3.fr", "/queue/test5", "producer8"))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
def test_success3(self):
# new connection
expectedOutput = [
"mytest.is.the.best/queue/test10/producer24",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertTrue(
self.mgr._MQConnectionManager__addMessenger("mytest.is.the.best", "/queue/test10", "producer24")
)
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
def test_success4(self):
# two times
expectedOutput = [
"mytest.is.the.best/queue/test10/producer2",
"mytest.is.the.best/queue/test10/producer24",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertTrue(
self.mgr._MQConnectionManager__addMessenger("mytest.is.the.best", "/queue/test10", "producer24")
)
self.assertTrue(self.mgr._MQConnectionManager__addMessenger("mytest.is.the.best", "/queue/test10", "producer2"))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
def test_failure(self):
# messenger already exists
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertFalse(self.mgr._MQConnectionManager__addMessenger("mardirac3.in2p3.fr", "/queue/test1", "producer4"))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
class TestMQConnectionStorageFunctions_removeMessenger(TestMQConnectionManager):
def test_success(self):
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertTrue(
self.mgr._MQConnectionManager__removeMessenger("mardirac3.in2p3.fr", "/queue/test1", "producer4")
)
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
def test_success2(self):
# remove whole destination /topic/test1 cause only one element
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
self.assertTrue(
self.mgr._MQConnectionManager__removeMessenger("mardirac3.in2p3.fr", "/topic/test1", "producer1")
)
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
def test_success3(self):
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
]
# remove whole connection
self.assertTrue(
self.mgr._MQConnectionManager__removeMessenger("testdir.blabla.ch", "/queue/test3", "producer1")
)
self.assertTrue(
self.mgr._MQConnectionManager__removeMessenger("testdir.blabla.ch", "/queue/test3", "consumer2")
)
self.assertTrue(
self.mgr._MQConnectionManager__removeMessenger("testdir.blabla.ch", "/queue/test3", "consumer3")
)
self.assertTrue(
self.mgr._MQConnectionManager__removeMessenger("testdir.blabla.ch", "/queue/test3", "consumer4")
)
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()), sorted(expectedOutput))
def test_failure(self):
# remove nonexisting messenger
self.assertFalse(
self.mgr._MQConnectionManager__removeMessenger("testdir.blabla.ch", "/queue/test3", "producer10")
)
def test_failure2(self):
# remove nonexisting destination
self.assertFalse(
self.mgr._MQConnectionManager__removeMessenger("testdir.blabla.ch", "/queue/nonexisting", "producer1")
)
def test_failure3(self):
# remove nonexisting connection
self.assertFalse(self.mgr._MQConnectionManager__removeMessenger("nonexisting", "/queue/test103", "producer1"))
class TestMQConnectionManager_addNewmessenger(TestMQConnectionManager):
def test_success(self):
result = self.mgr.addNewMessenger(mqURI="mardirac3.in2p3.fr::Queues::test1", messengerType="producer")
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], "producer5")
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer5",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
def test_success2(self):
result = self.mgr.addNewMessenger(mqURI="mardirac3.in2p3.fr::Topics::test1", messengerType="consumer")
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], "consumer5")
def test_success3(self):
result = self.mgr.addNewMessenger(mqURI="testdir.blabla.ch::Queues::test3", messengerType="consumer")
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], "consumer5")
def test_success4(self):
# connection does not exist
result = self.mgr.addNewMessenger(mqURI="noexisting.blabla.ch::Queues::test3", messengerType="consumer")
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], "consumer5")
expectedOutput = [
"noexisting.blabla.ch/queue/test3/consumer5",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
class TestMQConnectionManager_startConnection(TestMQConnectionManager):
def test_success(self):
# existing connection
result = self.mgr.startConnection(
mqURI="mardirac3.in2p3.fr::Queues::test1", params={}, messengerType="producer"
)
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], "producer5")
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer5",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
@mock.patch("DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.createConnectorAndConnect")
def test_success2(self, mock_createConnectorAndConnect):
# connection does not exist
mock_createConnectorAndConnect.return_value = S_OK("MyConnector")
result = self.mgr.startConnection(
mqURI="noexisting.blabla.ch::Queues::test3", params={}, messengerType="consumer"
)
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], "consumer5")
expectedOutput = [
"noexisting.blabla.ch/queue/test3/consumer5",
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
result = self.mgr.getConnector("noexisting.blabla.ch")
self.assertEqual(result["Value"], "MyConnector")
class TestMQConnectionManager_stopConnection(TestMQConnectionManager):
def test_success(self):
result = self.mgr.stopConnection(mqURI="mardirac3.in2p3.fr::Queues::test1", messengerId="producer4")
self.assertTrue(result["OK"])
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
def test_success2(self):
result = self.mgr.stopConnection(mqURI="mardirac3.in2p3.fr::Topics::test1", messengerId="producer1")
self.assertTrue(result["OK"])
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
@mock.patch("DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.unsubscribe")
@mock.patch("DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.disconnect")
def test_success3(self, mock_disconnect, mock_unsubscribe):
mock_disconnect.return_value = S_OK()
mock_unsubscribe.return_value = S_OK()
result = self.mgr.stopConnection(mqURI="testdir.blabla.ch::Queues::test3", messengerId="consumer3")
self.assertTrue(result["OK"])
result = self.mgr.stopConnection(mqURI="testdir.blabla.ch::Queues::test3", messengerId="producer1")
self.assertTrue(result["OK"])
result = self.mgr.stopConnection(mqURI="testdir.blabla.ch::Queues::test3", messengerId="consumer2")
self.assertTrue(result["OK"])
result = self.mgr.stopConnection(mqURI="testdir.blabla.ch::Queues::test3", messengerId="consumer4")
self.assertTrue(result["OK"])
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
class TestMQConnectionManager_removeAllConnections(TestMQConnectionManager):
@mock.patch("DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.disconnect")
def test_success(self, mock_disconnect):
mock_disconnect.return_value = S_OK()
result = self.mgr.removeAllConnections()
self.assertTrue(result["OK"])
expectedOutput = []
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
class TestMQConnectionManager_getAllMessengers(TestMQConnectionManager):
def test_success(self):
result = self.mgr.getAllMessengers()
self.assertTrue(result["OK"])
expectedOutput = [
"mardirac3.in2p3.fr/queue/test1/producer4",
"mardirac3.in2p3.fr/queue/test1/consumer1",
"mardirac3.in2p3.fr/queue/test1/consumer2",
"mardirac3.in2p3.fr/queue/test1/consumer4",
"mardirac3.in2p3.fr/queue/test2/producer2",
"mardirac3.in2p3.fr/queue/test2/consumer1",
"mardirac3.in2p3.fr/queue/test2/consumer2",
"mardirac3.in2p3.fr/topic/test1/producer1",
"testdir.blabla.ch/queue/test3/producer1",
"testdir.blabla.ch/queue/test3/consumer2",
"testdir.blabla.ch/queue/test3/consumer3",
"testdir.blabla.ch/queue/test3/consumer4",
]
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result["Value"]), sorted(expectedOutput))
class TestMQConnectionManager_getConnector(TestMQConnectionManager):
def test_success(self):
result = self.mgr.getConnector("mardirac3.in2p3.fr")
self.assertTrue(result["OK"])
def test_failure(self):
result = self.mgr.getConnector("nonexistent.in2p3.fr")
self.assertEqual(result["Message"], "Failed to get the MQConnector!")
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionManager)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionManager_addNewmessenger))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionManager_startConnection))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionManager_stopConnection))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionManager_removeAllConnections))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionManager_getAllMessengers))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionManager_getConnector))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_connectionExists))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_destinationExists))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_messengerExists))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getConnection))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getAllConnections))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getConnector))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_setConnector))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getDestinations))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getMessengersId))
suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getMessengersIdWithType)
)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_addMessenger))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_removeMessenger))
suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getAllMessengersInfo)
)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getAllMessengersId))
suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestMQConnectionStorageFunctions_getAllMessengersIdWithType)
)
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
ic-hep/DIRAC
|
src/DIRAC/Resources/MessageQueue/test/Test_MQConnectionManager.py
|
Python
|
gpl-3.0
| 32,786
|
[
"DIRAC"
] |
7dbe7fc971cc8f301ce2dd29f35fa63b29ab727ff8fa800e02a63b1ba9ee75b9
|
""" Library of splinter browser steps.
Adapted for pytest-bdd and pytest-splinter from behaving_ 1.1.
.. _behaving: https://pypi.python.org/pypi/behaving
"""
from pytest_bdd.parsers import parse
from pytest_bdd import (
given,
then,
when,
)
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
####################
# basic
@when(parse('I wait for {timeout:d} seconds'))
def wait_for_timeout(browser, timeout):
import time
time.sleep(timeout)
@when(parse('I show the element with id "{id}"'))
def show_element_by_id(browser, id):
assert browser.find_by_id(id)
browser.execute_script('document.getElementById("%s").style.display="inline";' % id)
@when(parse('I hide the element with id "{id}"'))
def hide_element_by_id(browser, id):
assert browser.find_by_id(id)
browser.execute_script('document.getElementById("%s").style.display="none";' % id)
@then(parse('I should see "{text}"'))
def should_see(browser, text):
assert browser.is_text_present(text), u'Text not found'
@then(parse('I should not see "{text}"'))
def should_not_see(browser, text):
assert browser.is_text_not_present(text), u'Text was found'
@then(parse('I should see "{text}" within {timeout:d} seconds'))
def should_see_within_timeout(browser, text, timeout):
assert browser.is_text_present(text, wait_time=timeout), u'Text not found'
@then(parse('I should not see "{text}" within {timeout:d} seconds'))
def should_not_see_within_timeout(browser, text, timeout):
assert browser.is_text_not_present(text, wait_time=timeout), u'Text was found'
@then(parse('I should see an element with id "{id}"'))
def should_see_element_with_id(browser, id):
assert browser.is_element_present_by_id(id), u'Element not found'
@then(parse('I should not see an element with id "{id}"'))
def should_not_see_element_with_id(browser, id):
assert browser.is_element_not_present_by_id(id), u'Element was found'
@then(parse('I should see an element with id "{id}" within {timeout:d} seconds'))
def should_see_element_with_id_within_timeout(browser, id, timeout):
assert browser.is_element_present_by_id(id, wait_time=timeout), u'Element not found'
@then(parse('I should not see an element with id "{id}" within {timeout:d} seconds'))
def should_not_see_element_with_id_within_timeout(browser, id, timeout):
assert browser.is_element_not_present_by_id(id, wait_time=timeout), u'Element was found'
@then(parse('I should see an element with the css selector "{css}"'))
def should_see_element_with_css(browser, css):
assert browser.is_element_present_by_css(css), u'Element not found'
@then(parse('I should not see an element with the css selector "{css}"'))
def should_not_see_element_with_css(browser, css):
assert browser.is_element_not_present_by_css(css), u'Element was found'
@then(parse('I should see an element with the css selector "{css}" within {timeout:d} seconds'))
def should_see_element_with_css_within_timeout(browser, css, timeout):
assert browser.is_element_present_by_css(css, wait_time=timeout), u'Element not found'
@then(parse('I should not see an element with the css selector "{css}" within {timeout:d} seconds'))
def should_not_see_element_with_css_within_timeout(browser, css, timeout):
assert browser.is_element_not_present_by_css(css, wait_time=timeout), u'Element was found'
@then(parse('I should see {n:d} elements with the css selector "{css}"'))
def should_see_n_elements_with_css(browser, n, css):
element_list = browser.find_by_css(css)
list_length = len(element_list)
assert list_length == n, u'Found {list_length} elements, expected {n}'.format(**locals())
####################
# browser
@when('I reload')
def reload(browser):
browser.reload()
@when('I go back')
def go_back(browser):
browser.back()
@when('I go forward')
def go_forward(browser):
browser.forward()
@when(parse('I set the cookie "{key}" to "{value}"'))
def set_cookie(browser, key, value):
browser.cookies.add({key: value})
@when(parse('I delete the cookie "{key}"'))
def delete_cookie(browser, key):
browser.cookies.delete(key)
@when(parse('I delete all cookies'))
def delete_all_cookies(browser):
browser.cookies.delete()
@when(parse('I resize the browser to {width}x{height}'))
def resize_browser(browser, width, height):
browser.driver.set_window_size(int(width), int(height))
@when(parse('I resize the viewport to {width}x{height}'))
def resize_viewport(browser, width, height):
width = int(width)
height = int(height)
b_size = browser.driver.get_window_size()
b_width = b_size['width']
b_height = b_size['height']
v_width = browser.evaluate_script("document.documentElement.clientWidth")
v_height = browser.evaluate_script("document.documentElement.clientHeight")
browser.driver.set_window_size(
b_width + width - v_width,
b_height + height - v_height)
@when("I maximize the browser's window")
def maximize_window(browser):
browser.driver.maximize_window()
####################
# forms
@when(parse('I fill in "{name}" with "{value}"'))
def i_fill_in_field(browser, name, value):
browser.fill(name, value)
@when(parse('I clear field "{name}"'))
def i_clear_field(browser, name):
el = browser.find_element_by_name(name)
assert el, 'Element not found'
el.clear()
@when(parse('I type "{value}" to "{name}"'))
def i_type_to(browser, name, value):
for key in browser.type(name, value, slowly=True):
assert key
@when(parse('I choose "{value}" from "{name}"'))
def i_choose_in_radio(browser, name, value):
browser.choose(name, value)
@when(parse('I check "{name}"'))
def i_check(browser, name):
browser.check(name)
@when(parse('I uncheck "{name}"'))
def i_uncheck(browser, name):
browser.uncheck(name)
@when(parse('I toggle "{name}"'))
def i_toggle(browser, name):
el = browser.find_by_name('digest')
assert el, u'Element not found'
el = el.first
if el.checked:
el.uncheck()
else:
el.check()
@when(parse('I select "{value}" from "{name}"'))
def i_select(browser, value, name):
from splinter.exceptions import ElementDoesNotExist
try:
browser.select(name, value)
except ElementDoesNotExist:
inp = browser.find_by_xpath("//input[@name='%s'][@value='%s']" % (name, value))
assert inp, u'Element not found'
inp.first.check()
@when(parse('I select "{value}" from dropdown "{name}"'))
def i_select(browser, value, name):
from splinter.exceptions import ElementDoesNotExist
try:
element = browser.find_by_xpath("//select[@class='%s']//option[@value='%s']" % (name, value)).first
element.click()
except ElementDoesNotExist:
inp = browser.find_by_xpath("//select[@class='%s']//option[@value='%s']" % (name, value)).first
assert inp, u'Element not found'
inp.first.check()
@when(parse('I press "{name}"'))
def i_press(browser, name):
element = browser.find_by_xpath(
("//*[@id='%(name)s']|"
"//*[@name='%(name)s']|"
"//button[contains(text(), '%(name)s')]|"
"//a[contains(text(), '%(name)s')]") % {'name': name})
assert element, u'Element not found'
element.first.click()
@when(parse('I press the element with xpath "{xpath}"'))
def i_press_xpath(browser, xpath):
button = browser.find_by_xpath(xpath)
assert button, u'Element not found'
button.first.click()
@when(parse('I attach the file "{path}" to "{name}"'))
def i_attach(browser, name, path):
import os
if not os.path.exists(path):
path = os.path.join(browser.attachment_dir, path)
if not os.path.exists(path):
assert False
browser.attach_file(name, path)
@when(parse('I set the inner HTML of the element with id "{id}" to "{contents}"'))
def set_html_content_to_element_with_id(browser, id, contents):
assert browser.evaluate_script("document.getElementById('%s').innerHTML = '%s'" % (id, contents)), \
u'Element not found or could not set HTML content'
@when(parse('I set the inner HTML of the element with class "{klass}" to "{contents}"'))
def set_html_content_to_element_with_class(browser, klass, contents):
assert browser.evaluate_script("document.getElementsByClassName('%s')[0].innerHTML = '%s'" % (klass, contents)), \
u'Element not found or could not set HTML content'
@then(parse('field "{name}" should have the value "{value}"'))
def field_has_value(browser, name, value):
el = browser.find_by_xpath(
("//*[@id='%(name)s']|"
"//*[@name='%(name)s']") % {'name': name})
assert el, u'Element not found'
assert el.first.value == value, "Values do not match, expected %s but got %s" % (value, el.first.value)
@then(parse('"{name}" should be enabled'))
def is_enabled(browser, name):
el = browser.find_by_xpath(
("//*[@id='%(name)s']|"
"//*[@name='%(name)s']") % {'name': name})
assert el, u'Element not found'
assert el.first._element.is_enabled()
@then(parse('"{name}" should be disabled'))
@then(parse('"{name}" should not be enabled'))
def is_disabled(browser, name):
el = browser.find_by_xpath(
("//*[@id='%(name)s']|"
"//*[@name='%(name)s']") % {'name': name})
assert el, u'Element not found'
assert not el.first._element.is_enabled()
@then(parse('field "{name}" should be valid'))
def field_is_valid(browser, name):
assert browser.find_by_name(name), u'Element not found'
assert browser.evaluate_script("document.getElementsByName('%s')[0].checkValidity()" % name), \
'Field is invalid'
@then(parse('field "{name}" should be invalid'))
@then(parse('field "{name}" should not be valid'))
def field_is_invalid(browser, name):
assert browser.find_by_name(name), u'Element not found'
assert not browser.evaluate_script("document.getElementsByName('%s')[0].checkValidity()" % name), \
'Field is valid'
@then(parse('field "{name}" should be required'))
def field_is_required(browser, name):
assert browser.find_by_name(name), u'Element not found'
assert browser.evaluate_script("document.getElementsByName('%s')[0].getAttribute('required')" % name), \
'Field is not required'
@then(parse('field "{name}" should not be required'))
def field_is_not_required(browser, name):
assert browser.find_by_name(name), u'Element not found'
assert not browser.evaluate_script("document.getElementsByName('%s')[0].getAttribute('required')" % name), \
'Field is required'
@when(parse('I enter "{text}" to the alert'))
def set_alert_text(browser, text):
alert = browser.driver.switch_to_alert()
assert alert, u'Alert not found'
alert.send_keys(text)
@when(parse('I accept the alert'))
def accept_alert(browser):
alert = browser.driver.switch_to_alert()
assert alert, u'Alert not found'
alert.accept()
@when(parse('I dismiss the alert'))
def dimiss_alert(browser):
alert = browser.driver.switch_to_alert()
assert alert, u'Alert not found'
alert.dismiss()
####################
# links
@when(parse('I click the link to "{url}"'))
def click_link_to_url(browser, url):
browser.click_link_by_href(url)
@when(parse('I click the link to a url that contains "{url}"'))
def click_link_to_url_that_contains(browser, url):
browser.click_link_by_partial_href(url)
@when(parse('I click the link with text "{text}"'))
def click_link_with_text(browser, text):
browser.click_link_by_text(text)
@when(parse('I click the link with text that contains "{text}"'))
def click_link_with_text_that_contains(browser, text):
anchors = browser.find_link_by_partial_text(text)
assert anchors, 'Link not found'
anchors[0].click()
####################
# url
@given(parse('the base url "{url}"'))
def base_url(url):
return url
@when(parse('I visit "{url}"'))
@when(parse('I go to "{url}"'))
def when_i_visit_url(browser, base_url, url):
full_url = urljoin(base_url, url)
browser.visit(full_url)
@then(parse('the browser\'s URL should be "{url}"'))
def the_browser_url_should_be(browser, base_url, url):
full_url = urljoin(base_url, url)
assert browser.url.strip() == full_url
@then(parse('the browser\'s URL should contain "{text}"'))
def the_browser_url_should_contain(browser, text):
assert text in browser.url
@then(parse('the browser\'s URL should not contain "{text}"'))
def the_browser_url_should_not_contain(browser, text):
assert text not in browser.url
|
philiptzou/clincoded
|
src/clincoded/tests/features/browsersteps.py
|
Python
|
mit
| 12,588
|
[
"VisIt"
] |
741ba15698fc177300b4d6c3a681262c80f6ad802931b18e95521aa5e35d8035
|
# ----------------------------------------------------------------------
# Copyright (c) 2016, The Regents of the University of California All
# rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of The Regents of the University of California
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE
# UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# ----------------------------------------------------------------------
# Filename: Specification.py
# Version: 0.1
# Description: An object encapsulating the specification files board_spec.xml,
# which is used by Altera's OpenCL SDK, and board_specification.xml, which is
# used by Tinker's TCL Scripts
# Author: Dustin Richmond
import xml.etree.ElementTree as ET
import Board, Description
from Tinker import check_path, prettify
class Specification(ET.ElementTree):
__C_FILENAME_MAP = {True: "board_specification.xml",
False: "board_spec.xml"}
# TODO: Do we pass the name in here? Or get it from Tinker using b & d
def __init__(self, t, b, d, verbose):
"""Construct a Specification Object
Arguments:
t -- A Tinker object with parameters for board generation
d -- A Description object that describes the user
settings of a custom board
b -- A Board object that describes the interfaces and
custom IP on a development board
"""
self.__t = t
self.__b = b
self.__d = d
self.__version = t.get_version()
self.__verbose = verbose
r = self.__get_root()
e = self.__b.get_compile_element()
r.append(e)
e = self.__get_device_element()
r.append(e)
ms = self.__d.get_global_mem_elements(self.__version, self.__verbose)
r.extend(ms)
e = self.__get_interfaces_element()
r.append(e)
e = self.__get_host_element()
r.append(e)
self.__filename = self.__C_FILENAME_MAP[self.__verbose]
super(Specification,self).__init__(element=r)
def __get_root(self):
n = self.__t.get_name(self.__b, self.__d)
if(self.__version == 14.1):
r = ET.Element("board", attrib={"version": "0.9","name":n})
else:
r = ET.Element("board", attrib={"version": str(self.__version),
"name":n})
if(self.__verbose):
r.set("file", self.__t.get_name_skel_xml())
return r
def __get_device_element(self):
e = ET.Element("device", attrib={"device_model":self.__b["model"]})
ur = ET.Element("used_resources")
e.append(ur)
r = self.__d.get_resources(self.__version, self.__verbose)
for rt,rc in r.iteritems():
re = ET.Element(rt, attrib={"num":str(rc)})
ur.append(re)
return e
def __get_interfaces_element(self):
v = self.__t.get_version()
e = ET.Element("interfaces")
ifs = self.__d.get_interface_elements(self.__version, self.__verbose)
for i in ifs:
e.append(i)
return e
def __get_host_element(self):
e = ET.Element("host")
hes = self.__d.get_host_elements(self.__version, self.__verbose)
for he in hes:
e.append(he)
return e
def write(self, p):
"""Write an XML representation of this Specification object
to a file.
Arguments:
p - file destination path
"""
check_path(p)
p += "/"+self.__filename
s = prettify(self.getroot())
fp = open(p, "w")
fp.write(s)
fp.close()
|
drichmond/tinker
|
python/Specification.py
|
Python
|
bsd-3-clause
| 5,116
|
[
"TINKER"
] |
e300156835b960f47fc40c4766ca86ac830813610a1440da8e748b0da6bc25e5
|
"""
Dashboard view and supporting methods
"""
import datetime
import logging
from collections import defaultdict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from edx_django_utils import monitoring as monitoring_utils
from edx_django_utils.plugins import get_plugins_view_context
from edx_toggles.toggles import LegacyWaffleFlag, LegacyWaffleFlagNamespace
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from lms.djangoapps.bulk_email.api import is_bulk_email_feature_enabled
from lms.djangoapps.bulk_email.models import Optout
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.edxmako.shortcuts import render_to_response, render_to_string
from common.djangoapps.entitlements.models import CourseEntitlement
from lms.djangoapps.commerce.utils import EcommerceService
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.experiments.utils import get_dashboard_course_info, get_experiment_user_metadata_context
from lms.djangoapps.verify_student.services import IDVerificationService
from openedx.core.djangoapps.catalog.utils import (
get_programs,
get_pseudo_session_for_entitlement,
get_visible_sessions_for_entitlement
)
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_attribute_values, make_providers_strings
from openedx.core.djangoapps.plugins.constants import ProjectType
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.utils import ProgramDataExtender, ProgramProgressMeter
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.accounts.utils import is_secondary_email_feature_enabled
from openedx.core.djangoapps.util.maintenance_banner import add_maintenance_banner
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.enterprise_support.api import (
get_dashboard_consent_notification,
get_enterprise_learner_portal_enabled_message
)
from common.djangoapps.student.api import COURSE_DASHBOARD_PLUGIN_VIEW_NAME
from common.djangoapps.student.helpers import cert_info, check_verify_status_by_course, get_resume_urls_for_enrollments
from common.djangoapps.student.models import (
AccountRecovery,
CourseEnrollment,
CourseEnrollmentAttribute,
DashboardConfiguration,
PendingSecondaryEmailChange,
UserProfile
)
from common.djangoapps.util.milestones_helpers import get_pre_requisite_courses_not_completed
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.student")
experiments_namespace = LegacyWaffleFlagNamespace(name='student.experiments')
def get_org_black_and_whitelist_for_site():
"""
Returns the org blacklist and whitelist for the current site.
Returns:
(org_whitelist, org_blacklist): A tuple of lists of orgs that serve as
either a blacklist or a whitelist of orgs for the current site. The
whitelist takes precedence, and the blacklist is used if the
whitelist is None.
"""
# Default blacklist is empty.
org_blacklist = None
# Whitelist the orgs configured for the current site. Each site outside
# of edx.org has a list of orgs associated with its configuration.
org_whitelist = configuration_helpers.get_current_site_orgs()
if not org_whitelist:
# If there is no whitelist, the blacklist will include all orgs that
# have been configured for any other sites. This applies to edx.org,
# where it is easier to blacklist all other orgs.
org_blacklist = configuration_helpers.get_all_orgs()
return org_whitelist, org_blacklist
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _create_recent_enrollment_message(course_enrollments, course_modes): # lint-amnesty, pylint: disable=unused-argument
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enrollments_count = len(recently_enrolled_courses)
course_name_separator = ', '
# If length of enrolled course 2, join names with 'and'
if enrollments_count == 2:
course_name_separator = _(' and ')
course_names = course_name_separator.join(
[enrollment.course_overview.display_name for enrollment in recently_enrolled_courses]
)
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{
'course_names': course_names,
'enrollments_count': enrollments_count,
'platform_name': platform_name,
'course_id': recently_enrolled_courses[0].course_overview.id if enrollments_count == 1 else None
}
)
def get_course_enrollments(user, org_whitelist, org_blacklist, course_limit=None):
"""
Given a user, return a filtered set of their course enrollments.
Arguments:
user (User): the user in question.
org_whitelist (list[str]): If not None, ONLY courses of these orgs will be returned.
org_blacklist (list[str]): Courses of these orgs will be excluded.
course_limit: Number courses to load in dashboard if set to None then all the courses would be load.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user_with_overviews_preload(user, course_limit):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not in the whitelist.
if org_whitelist and course_overview.location.org not in org_whitelist:
continue
# Conversely, filter out any enrollments in the blacklist.
elif org_blacklist and course_overview.location.org in org_blacklist:
continue
# Else, include the enrollment.
else:
yield enrollment
def get_filtered_course_entitlements(user, org_whitelist, org_blacklist):
"""
Given a user, return a filtered set of their course entitlements.
Arguments:
user (User): the user in question.
org_whitelist (list[str]): If not None, ONLY entitlements of these orgs will be returned.
org_blacklist (list[str]): CourseEntitlements of these orgs will be excluded.
Returns:
generator[CourseEntitlement]: a sequence of entitlements to be displayed
on the user's dashboard.
"""
course_entitlement_available_sessions = {}
unfulfilled_entitlement_pseudo_sessions = {}
course_entitlements = list(CourseEntitlement.get_active_entitlements_for_user(user))
filtered_entitlements = []
pseudo_session = None
course_run_key = None
for course_entitlement in course_entitlements:
course_entitlement.update_expired_at()
available_runs = get_visible_sessions_for_entitlement(course_entitlement)
if not course_entitlement.enrollment_course_run:
# Unfulfilled entitlements need a mock session for metadata
pseudo_session = get_pseudo_session_for_entitlement(course_entitlement)
unfulfilled_entitlement_pseudo_sessions[str(course_entitlement.uuid)] = pseudo_session
# Check the org of the Course and filter out entitlements that are not available.
if course_entitlement.enrollment_course_run:
course_run_key = course_entitlement.enrollment_course_run.course_id
elif available_runs:
course_run_key = CourseKey.from_string(available_runs[0]['key'])
elif pseudo_session:
course_run_key = CourseKey.from_string(pseudo_session['key'])
if course_run_key:
# If there is no course_run_key at this point we will be unable to determine if it should be shown.
# Therefore it should be excluded by default.
if org_whitelist and course_run_key.org not in org_whitelist:
continue
elif org_blacklist and course_run_key.org in org_blacklist:
continue
course_entitlement_available_sessions[str(course_entitlement.uuid)] = available_runs
filtered_entitlements.append(course_entitlement)
return filtered_entitlements, course_entitlement_available_sessions, unfulfilled_entitlement_pseudo_sessions
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def get_verification_error_reasons_for_display(verification_error_codes):
"""
Returns the display text for the given verification error codes.
"""
verification_errors = []
verification_error_map = {
'photos_mismatched': _('Photos are mismatched'),
'id_image_missing_name': _('Name missing from ID photo'),
'id_image_missing': _('ID photo not provided'),
'id_invalid': _('ID is invalid'),
'user_image_not_clear': _('Learner photo is blurry'),
'name_mismatch': _('Name on ID does not match name on account'),
'user_image_missing': _('Learner photo not provided'),
'id_image_not_clear': _('ID photo is blurry'),
}
for error in verification_error_codes:
error_text = verification_error_map.get(error)
if error_text:
verification_errors.append(error_text)
return verification_errors
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=list(credit_enrollments.values())
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(str(eligibility["course_key"]))
providers_names = get_credit_provider_attribute_values(course_key, 'display_name')
status = {
"course_key": str(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
"Could not find credit provider associated with credit enrollment "
"for user %s in course %s. The user will not be able to see their "
"credit request status on the student dashboard. This attribute should "
"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
if not status["provider_name"] and not status["provider_status_url"]:
status["error"] = True
log.error(
"Could not find credit provider info for [%s] in [%s]. The user will not "
"be able to see their credit request status on the student dashboard.",
provider_id, provider_info_by_id
)
statuses[course_key] = status
return statuses
def show_load_all_courses_link(user, course_limit, course_enrollments):
"""
By default dashboard will show limited courses based on the course limit
set in configuration.
A link would be provided provided at the bottom to load all the courses if there are any courses.
"""
if course_limit is None:
return False
total_enrollments = CourseEnrollment.enrollments_for_user(user).count()
return len(course_enrollments) < total_enrollments
def get_dashboard_course_limit():
"""
get course limit from configuration
"""
course_limit = getattr(settings, 'DASHBOARD_COURSE_LIMIT', None)
return course_limit
@login_required
@ensure_csrf_cookie
@add_maintenance_banner
def student_dashboard(request): # lint-amnesty, pylint: disable=too-many-statements
"""
Provides the LMS dashboard view
TODO: This is lms specific and does not belong in common code.
Note:
To load the all courses set course_limit=None as parameter in GET. If its not None then default course
limit will be used that is set in configuration
Arguments:
request: The request object.
Returns:
The dashboard response.
"""
user = request.user
if not UserProfile.objects.filter(user=user).exists():
return redirect(reverse('account_settings'))
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
enable_verified_certificates = configuration_helpers.get_value(
'ENABLE_VERIFIED_CERTIFICATES',
settings.FEATURES.get('ENABLE_VERIFIED_CERTIFICATES')
)
display_course_modes_on_dashboard = configuration_helpers.get_value(
'DISPLAY_COURSE_MODES_ON_DASHBOARD',
settings.FEATURES.get('DISPLAY_COURSE_MODES_ON_DASHBOARD', True)
)
activation_email_support_link = configuration_helpers.get_value(
'ACTIVATION_EMAIL_SUPPORT_LINK', settings.ACTIVATION_EMAIL_SUPPORT_LINK
) or settings.SUPPORT_SITE_LINK
hide_dashboard_courses_until_activated = configuration_helpers.get_value(
'HIDE_DASHBOARD_COURSES_UNTIL_ACTIVATED',
settings.FEATURES.get('HIDE_DASHBOARD_COURSES_UNTIL_ACTIVATED', False)
)
empty_dashboard_message = configuration_helpers.get_value(
'EMPTY_DASHBOARD_MESSAGE', None
)
disable_course_limit = request and 'course_limit' in request.GET
course_limit = get_dashboard_course_limit() if not disable_course_limit else None
# Get the org whitelist or the org blacklist for the current site
site_org_whitelist, site_org_blacklist = get_org_black_and_whitelist_for_site()
course_enrollments = list(get_course_enrollments(user, site_org_whitelist, site_org_blacklist, course_limit))
# Get the entitlements for the user and a mapping to all available sessions for that entitlement
# If an entitlement has no available sessions, pass through a mock course overview object
(course_entitlements,
course_entitlement_available_sessions,
unfulfilled_entitlement_pseudo_sessions) = get_filtered_course_entitlements(
user,
site_org_whitelist,
site_org_blacklist
)
# Record how many courses there are so that we can get a better
# understanding of usage patterns on prod.
monitoring_utils.accumulate('num_courses', len(course_enrollments))
# Sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.items()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
# Display activation message
activate_account_message = ''
if not user.is_active:
activate_account_message = Text(_(
"Check your {email_start}{email}{email_end} inbox for an account activation link from {platform_name}. "
"If you need help, contact {link_start}{platform_name} Support{link_end}."
)).format(
platform_name=platform_name,
email_start=HTML("<strong>"),
email_end=HTML("</strong>"),
email=user.email,
link_start=HTML("<a target='_blank' href='{activation_email_support_link}'>").format(
activation_email_support_link=activation_email_support_link,
),
link_end=HTML("</a>"),
)
enterprise_message = get_dashboard_consent_notification(request, user, course_enrollments)
# Display a message guiding the user to their Enterprise's Learner Portal if enabled
enterprise_learner_portal_enabled_message = get_enterprise_learner_portal_enabled_message(request)
recovery_email_message = recovery_email_activation_message = None
if is_secondary_email_feature_enabled():
try:
pending_email = PendingSecondaryEmailChange.objects.get(user=user) # lint-amnesty, pylint: disable=unused-variable
except PendingSecondaryEmailChange.DoesNotExist:
try:
account_recovery_obj = AccountRecovery.objects.get(user=user) # lint-amnesty, pylint: disable=unused-variable
except AccountRecovery.DoesNotExist:
recovery_email_message = Text(
_(
"Add a recovery email to retain access when single-sign on is not available. "
"Go to {link_start}your Account Settings{link_end}.")
).format(
link_start=HTML("<a href='{account_setting_page}'>").format(
account_setting_page=reverse('account_settings'),
),
link_end=HTML("</a>")
)
else:
recovery_email_activation_message = Text(
_(
"Recovery email is not activated yet. "
"Kindly visit your email and follow the instructions to activate it."
)
)
# Disable lookup of Enterprise consent_required_course due to ENT-727
# Will re-enable after fixing WL-1315
consent_required_courses = set()
enterprise_customer_name = None
# Account activation message
account_activation_messages = [
message for message in messages.get_messages(request) if 'account-activation' in message.tags
]
# Global staff can see what courses encountered an error on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that encountered an error on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = {
enrollment.course_id: has_access(request.user, 'load', enrollment.course_overview)
for enrollment in course_enrollments
}
# Find programs associated with course runs being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = ProgramProgressMeter(request.site, user, enrollments=course_enrollments)
ecommerce_service = EcommerceService()
inverted_programs = meter.invert_programs()
urls, programs_data = {}, {}
bundles_on_dashboard_flag = LegacyWaffleFlag(experiments_namespace, 'bundles_on_dashboard', __name__)
# TODO: Delete this code and the relevant HTML code after testing LEARNER-3072 is complete
if bundles_on_dashboard_flag.is_enabled() and inverted_programs and list(inverted_programs.items()):
if len(course_enrollments) < 4:
for program in inverted_programs.values():
try:
program_uuid = program[0]['uuid']
program_data = get_programs(uuid=program_uuid)
program_data = ProgramDataExtender(program_data, request.user).extend()
skus = program_data.get('skus')
checkout_page_url = ecommerce_service.get_checkout_page_url(*skus)
program_data['completeProgramURL'] = checkout_page_url + '&bundle=' + program_data.get('uuid')
programs_data[program_uuid] = program_data
except: # pylint: disable=bare-except
pass
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
is_bulk_email_feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status = IDVerificationService.user_status(user)
verification_errors = get_verification_error_reasons_for_display(verification_status['error'])
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
elif 'access_response_error' in request.GET:
# This can be populated in a generalized way with fields from access response errors
redirect_message = request.GET['access_response_error']
else:
redirect_message = ''
valid_verification_statuses = ['approved', 'must_reverify', 'pending', 'expired']
display_sidebar_on_dashboard = verification_status['status'] in valid_verification_statuses and \
verification_status['should_display']
# Filter out any course enrollment course cards that are associated with fulfilled entitlements
for entitlement in [e for e in course_entitlements if e.enrollment_course_run is not None]:
course_enrollments = [
enr for enr in course_enrollments if entitlement.enrollment_course_run.course_id != enr.course_id
]
context = {
'urls': urls,
'programs_data': programs_data,
'enterprise_message': enterprise_message,
'consent_required_courses': consent_required_courses,
'enterprise_customer_name': enterprise_customer_name,
'enrollment_message': enrollment_message,
'redirect_message': Text(redirect_message),
'account_activation_messages': account_activation_messages,
'activate_account_message': activate_account_message,
'course_enrollments': course_enrollments,
'course_entitlements': course_entitlements,
'course_entitlement_available_sessions': course_entitlement_available_sessions,
'unfulfilled_entitlement_pseudo_sessions': unfulfilled_entitlement_pseudo_sessions,
'course_optouts': course_optouts,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_display': verification_status['should_display'],
'verification_status': verification_status['status'],
'verification_expiry': verification_status['verification_expiry'],
'verification_status_by_course': verify_status_by_course,
'verification_errors': verification_errors,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'inverted_programs': inverted_programs,
'show_program_listing': ProgramsApiConfig.is_enabled(),
'show_dashboard_tabs': True,
'disable_courseware_js': True,
'display_course_modes_on_dashboard': enable_verified_certificates and display_course_modes_on_dashboard,
'display_sidebar_on_dashboard': display_sidebar_on_dashboard,
'display_sidebar_account_activation_message': not(user.is_active or hide_dashboard_courses_until_activated),
'display_dashboard_courses': (user.is_active or not hide_dashboard_courses_until_activated),
'empty_dashboard_message': empty_dashboard_message,
'recovery_email_message': recovery_email_message,
'recovery_email_activation_message': recovery_email_activation_message,
'enterprise_learner_portal_enabled_message': enterprise_learner_portal_enabled_message,
'show_load_all_courses_link': show_load_all_courses_link(user, course_limit, course_enrollments),
# TODO START: clean up as part of REVEM-199 (START)
'course_info': get_dashboard_course_info(user, course_enrollments),
# TODO START: clean up as part of REVEM-199 (END)
}
context_from_plugins = get_plugins_view_context(
ProjectType.LMS,
COURSE_DASHBOARD_PLUGIN_VIEW_NAME,
context
)
context.update(context_from_plugins)
course = None
context.update(
get_experiment_user_metadata_context(
course,
user,
)
)
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
# Gather urls for course card resume buttons.
resume_button_urls = ['' for entitlement in course_entitlements]
for url in get_resume_urls_for_enrollments(user, course_enrollments).values():
resume_button_urls.append(url)
# There must be enough urls for dashboard.html. Template creates course
# cards for "enrollments + entitlements".
context.update({
'resume_button_urls': resume_button_urls
})
return render_to_response('dashboard.html', context)
|
eduNEXT/edunext-platform
|
common/djangoapps/student/views/dashboard.py
|
Python
|
agpl-3.0
| 35,762
|
[
"VisIt"
] |
475e127c75e993a3363c1c58854b28f33e2d0c9f8c5e50ef5a6b3f4e17afc7f4
|
# noqa
EFF_WORDS = [
"abacus",
"abdomen",
"abdominal",
"abide",
"abiding",
"ability",
"ablaze",
"able",
"abnormal",
"abrasion",
"abrasive",
"abreast",
"abridge",
"abroad",
"abruptly",
"absence",
"absentee",
"absently",
"absinthe",
"absolute",
"absolve",
"abstain",
"abstract",
"absurd",
"accent",
"acclaim",
"acclimate",
"accompany",
"account",
"accuracy",
"accurate",
"accustom",
"acetone",
"achiness",
"aching",
"acid",
"acorn",
"acquaint",
"acquire",
"acre",
"acrobat",
"acronym",
"acting",
"action",
"activate",
"activator",
"active",
"activism",
"activist",
"activity",
"actress",
"acts",
"acutely",
"acuteness",
"aeration",
"aerobics",
"aerosol",
"aerospace",
"afar",
"affair",
"affected",
"affecting",
"affection",
"affidavit",
"affiliate",
"affirm",
"affix",
"afflicted",
"affluent",
"afford",
"affront",
"aflame",
"afloat",
"aflutter",
"afoot",
"afraid",
"afterglow",
"afterlife",
"aftermath",
"aftermost",
"afternoon",
"aged",
"ageless",
"agency",
"agenda",
"agent",
"aggregate",
"aghast",
"agile",
"agility",
"aging",
"agnostic",
"agonize",
"agonizing",
"agony",
"agreeable",
"agreeably",
"agreed",
"agreeing",
"agreement",
"aground",
"ahead",
"ahoy",
"aide",
"aids",
"aim",
"ajar",
"alabaster",
"alarm",
"albatross",
"album",
"alfalfa",
"algebra",
"algorithm",
"alias",
"alibi",
"alienable",
"alienate",
"aliens",
"alike",
"alive",
"alkaline",
"alkalize",
"almanac",
"almighty",
"almost",
"aloe",
"aloft",
"aloha",
"alone",
"alongside",
"aloof",
"alphabet",
"alright",
"although",
"altitude",
"alto",
"aluminum",
"alumni",
"always",
"amaretto",
"amaze",
"amazingly",
"amber",
"ambiance",
"ambiguity",
"ambiguous",
"ambition",
"ambitious",
"ambulance",
"ambush",
"amendable",
"amendment",
"amends",
"amenity",
"amiable",
"amicably",
"amid",
"amigo",
"amino",
"amiss",
"ammonia",
"ammonium",
"amnesty",
"amniotic",
"among",
"amount",
"amperage",
"ample",
"amplifier",
"amplify",
"amply",
"amuck",
"amulet",
"amusable",
"amused",
"amusement",
"amuser",
"amusing",
"anaconda",
"anaerobic",
"anagram",
"anatomist",
"anatomy",
"anchor",
"anchovy",
"ancient",
"android",
"anemia",
"anemic",
"aneurism",
"anew",
"angelfish",
"angelic",
"anger",
"angled",
"angler",
"angles",
"angling",
"angrily",
"angriness",
"anguished",
"angular",
"animal",
"animate",
"animating",
"animation",
"animator",
"anime",
"animosity",
"ankle",
"annex",
"annotate",
"announcer",
"annoying",
"annually",
"annuity",
"anointer",
"another",
"answering",
"antacid",
"antarctic",
"anteater",
"antelope",
"antennae",
"anthem",
"anthill",
"anthology",
"antibody",
"antics",
"antidote",
"antihero",
"antiquely",
"antiques",
"antiquity",
"antirust",
"antitoxic",
"antitrust",
"antiviral",
"antivirus",
"antler",
"antonym",
"antsy",
"anvil",
"anybody",
"anyhow",
"anymore",
"anyone",
"anyplace",
"anything",
"anytime",
"anyway",
"anywhere",
"aorta",
"apache",
"apostle",
"appealing",
"appear",
"appease",
"appeasing",
"appendage",
"appendix",
"appetite",
"appetizer",
"applaud",
"applause",
"apple",
"appliance",
"applicant",
"applied",
"apply",
"appointee",
"appraisal",
"appraiser",
"apprehend",
"approach",
"approval",
"approve",
"apricot",
"april",
"apron",
"aptitude",
"aptly",
"aqua",
"aqueduct",
"arbitrary",
"arbitrate",
"ardently",
"area",
"arena",
"arguable",
"arguably",
"argue",
"arise",
"armadillo",
"armband",
"armchair",
"armed",
"armful",
"armhole",
"arming",
"armless",
"armoire",
"armored",
"armory",
"armrest",
"army",
"aroma",
"arose",
"around",
"arousal",
"arrange",
"array",
"arrest",
"arrival",
"arrive",
"arrogance",
"arrogant",
"arson",
"art",
"ascend",
"ascension",
"ascent",
"ascertain",
"ashamed",
"ashen",
"ashes",
"ashy",
"aside",
"askew",
"asleep",
"asparagus",
"aspect",
"aspirate",
"aspire",
"aspirin",
"astonish",
"astound",
"astride",
"astrology",
"astronaut",
"astronomy",
"astute",
"atlantic",
"atlas",
"atom",
"atonable",
"atop",
"atrium",
"atrocious",
"atrophy",
"attach",
"attain",
"attempt",
"attendant",
"attendee",
"attention",
"attentive",
"attest",
"attic",
"attire",
"attitude",
"attractor",
"attribute",
"atypical",
"auction",
"audacious",
"audacity",
"audible",
"audibly",
"audience",
"audio",
"audition",
"augmented",
"august",
"authentic",
"author",
"autism",
"autistic",
"autograph",
"automaker",
"automated",
"automatic",
"autopilot",
"available",
"avalanche",
"avatar",
"avenge",
"avenging",
"avenue",
"average",
"aversion",
"avert",
"aviation",
"aviator",
"avid",
"avoid",
"await",
"awaken",
"award",
"aware",
"awhile",
"awkward",
"awning",
"awoke",
"awry",
"axis",
"babble",
"babbling",
"babied",
"baboon",
"backache",
"backboard",
"backboned",
"backdrop",
"backed",
"backer",
"backfield",
"backfire",
"backhand",
"backing",
"backlands",
"backlash",
"backless",
"backlight",
"backlit",
"backlog",
"backpack",
"backpedal",
"backrest",
"backroom",
"backshift",
"backside",
"backslid",
"backspace",
"backspin",
"backstab",
"backstage",
"backtalk",
"backtrack",
"backup",
"backward",
"backwash",
"backwater",
"backyard",
"bacon",
"bacteria",
"bacterium",
"badass",
"badge",
"badland",
"badly",
"badness",
"baffle",
"baffling",
"bagel",
"bagful",
"baggage",
"bagged",
"baggie",
"bagginess",
"bagging",
"baggy",
"bagpipe",
"baguette",
"baked",
"bakery",
"bakeshop",
"baking",
"balance",
"balancing",
"balcony",
"balmy",
"balsamic",
"bamboo",
"banana",
"banish",
"banister",
"banjo",
"bankable",
"bankbook",
"banked",
"banker",
"banking",
"banknote",
"bankroll",
"banner",
"bannister",
"banshee",
"banter",
"barbecue",
"barbed",
"barbell",
"barber",
"barcode",
"barge",
"bargraph",
"barista",
"baritone",
"barley",
"barmaid",
"barman",
"barn",
"barometer",
"barrack",
"barracuda",
"barrel",
"barrette",
"barricade",
"barrier",
"barstool",
"bartender",
"barterer",
"bash",
"basically",
"basics",
"basil",
"basin",
"basis",
"basket",
"batboy",
"batch",
"bath",
"baton",
"bats",
"battalion",
"battered",
"battering",
"battery",
"batting",
"battle",
"bauble",
"bazooka",
"blabber",
"bladder",
"blade",
"blah",
"blame",
"blaming",
"blanching",
"blandness",
"blank",
"blaspheme",
"blasphemy",
"blast",
"blatancy",
"blatantly",
"blazer",
"blazing",
"bleach",
"bleak",
"bleep",
"blemish",
"blend",
"bless",
"blighted",
"blimp",
"bling",
"blinked",
"blinker",
"blinking",
"blinks",
"blip",
"blissful",
"blitz",
"blizzard",
"bloated",
"bloating",
"blob",
"blog",
"bloomers",
"blooming",
"blooper",
"blot",
"blouse",
"blubber",
"bluff",
"bluish",
"blunderer",
"blunt",
"blurb",
"blurred",
"blurry",
"blurt",
"blush",
"blustery",
"boaster",
"boastful",
"boasting",
"boat",
"bobbed",
"bobbing",
"bobble",
"bobcat",
"bobsled",
"bobtail",
"bodacious",
"body",
"bogged",
"boggle",
"bogus",
"boil",
"bok",
"bolster",
"bolt",
"bonanza",
"bonded",
"bonding",
"bondless",
"boned",
"bonehead",
"boneless",
"bonelike",
"boney",
"bonfire",
"bonnet",
"bonsai",
"bonus",
"bony",
"boogeyman",
"boogieman",
"book",
"boondocks",
"booted",
"booth",
"bootie",
"booting",
"bootlace",
"bootleg",
"boots",
"boozy",
"borax",
"boring",
"borough",
"borrower",
"borrowing",
"boss",
"botanical",
"botanist",
"botany",
"botch",
"both",
"bottle",
"bottling",
"bottom",
"bounce",
"bouncing",
"bouncy",
"bounding",
"boundless",
"bountiful",
"bovine",
"boxcar",
"boxer",
"boxing",
"boxlike",
"boxy",
"breach",
"breath",
"breeches",
"breeching",
"breeder",
"breeding",
"breeze",
"breezy",
"brethren",
"brewery",
"brewing",
"briar",
"bribe",
"brick",
"bride",
"bridged",
"brigade",
"bright",
"brilliant",
"brim",
"bring",
"brink",
"brisket",
"briskly",
"briskness",
"bristle",
"brittle",
"broadband",
"broadcast",
"broaden",
"broadly",
"broadness",
"broadside",
"broadways",
"broiler",
"broiling",
"broken",
"broker",
"bronchial",
"bronco",
"bronze",
"bronzing",
"brook",
"broom",
"brought",
"browbeat",
"brownnose",
"browse",
"browsing",
"bruising",
"brunch",
"brunette",
"brunt",
"brush",
"brussels",
"brute",
"brutishly",
"bubble",
"bubbling",
"bubbly",
"buccaneer",
"bucked",
"bucket",
"buckle",
"buckshot",
"buckskin",
"bucktooth",
"buckwheat",
"buddhism",
"buddhist",
"budding",
"buddy",
"budget",
"buffalo",
"buffed",
"buffer",
"buffing",
"buffoon",
"buggy",
"bulb",
"bulge",
"bulginess",
"bulgur",
"bulk",
"bulldog",
"bulldozer",
"bullfight",
"bullfrog",
"bullhorn",
"bullion",
"bullish",
"bullpen",
"bullring",
"bullseye",
"bullwhip",
"bully",
"bunch",
"bundle",
"bungee",
"bunion",
"bunkbed",
"bunkhouse",
"bunkmate",
"bunny",
"bunt",
"busboy",
"bush",
"busily",
"busload",
"bust",
"busybody",
"buzz",
"cabana",
"cabbage",
"cabbie",
"cabdriver",
"cable",
"caboose",
"cache",
"cackle",
"cacti",
"cactus",
"caddie",
"caddy",
"cadet",
"cadillac",
"cadmium",
"cage",
"cahoots",
"cake",
"calamari",
"calamity",
"calcium",
"calculate",
"calculus",
"caliber",
"calibrate",
"calm",
"caloric",
"calorie",
"calzone",
"camcorder",
"cameo",
"camera",
"camisole",
"camper",
"campfire",
"camping",
"campsite",
"campus",
"canal",
"canary",
"cancel",
"candied",
"candle",
"candy",
"cane",
"canine",
"canister",
"cannabis",
"canned",
"canning",
"cannon",
"cannot",
"canola",
"canon",
"canopener",
"canopy",
"canteen",
"canyon",
"capable",
"capably",
"capacity",
"cape",
"capillary",
"capital",
"capitol",
"capped",
"capricorn",
"capsize",
"capsule",
"caption",
"captivate",
"captive",
"captivity",
"capture",
"caramel",
"carat",
"caravan",
"carbon",
"cardboard",
"carded",
"cardiac",
"cardigan",
"cardinal",
"cardstock",
"carefully",
"caregiver",
"careless",
"caress",
"caretaker",
"cargo",
"caring",
"carless",
"carload",
"carmaker",
"carnage",
"carnation",
"carnival",
"carnivore",
"carol",
"carpenter",
"carpentry",
"carpool",
"carport",
"carried",
"carrot",
"carrousel",
"carry",
"cartel",
"cartload",
"carton",
"cartoon",
"cartridge",
"cartwheel",
"carve",
"carving",
"carwash",
"cascade",
"case",
"cash",
"casing",
"casino",
"casket",
"cassette",
"casually",
"casualty",
"catacomb",
"catalog",
"catalyst",
"catalyze",
"catapult",
"cataract",
"catatonic",
"catcall",
"catchable",
"catcher",
"catching",
"catchy",
"caterer",
"catering",
"catfight",
"catfish",
"cathedral",
"cathouse",
"catlike",
"catnap",
"catnip",
"catsup",
"cattail",
"cattishly",
"cattle",
"catty",
"catwalk",
"caucasian",
"caucus",
"causal",
"causation",
"cause",
"causing",
"cauterize",
"caution",
"cautious",
"cavalier",
"cavalry",
"caviar",
"cavity",
"cedar",
"celery",
"celestial",
"celibacy",
"celibate",
"celtic",
"cement",
"census",
"ceramics",
"ceremony",
"certainly",
"certainty",
"certified",
"certify",
"cesarean",
"cesspool",
"chafe",
"chaffing",
"chain",
"chair",
"chalice",
"challenge",
"chamber",
"chamomile",
"champion",
"chance",
"change",
"channel",
"chant",
"chaos",
"chaperone",
"chaplain",
"chapped",
"chaps",
"chapter",
"character",
"charbroil",
"charcoal",
"charger",
"charging",
"chariot",
"charity",
"charm",
"charred",
"charter",
"charting",
"chase",
"chasing",
"chaste",
"chastise",
"chastity",
"chatroom",
"chatter",
"chatting",
"chatty",
"cheating",
"cheddar",
"cheek",
"cheer",
"cheese",
"cheesy",
"chef",
"chemicals",
"chemist",
"chemo",
"cherisher",
"cherub",
"chess",
"chest",
"chevron",
"chevy",
"chewable",
"chewer",
"chewing",
"chewy",
"chief",
"chihuahua",
"childcare",
"childhood",
"childish",
"childless",
"childlike",
"chili",
"chill",
"chimp",
"chip",
"chirping",
"chirpy",
"chitchat",
"chivalry",
"chive",
"chloride",
"chlorine",
"choice",
"chokehold",
"choking",
"chomp",
"chooser",
"choosing",
"choosy",
"chop",
"chosen",
"chowder",
"chowtime",
"chrome",
"chubby",
"chuck",
"chug",
"chummy",
"chump",
"chunk",
"churn",
"chute",
"cider",
"cilantro",
"cinch",
"cinema",
"cinnamon",
"circle",
"circling",
"circular",
"circulate",
"circus",
"citable",
"citadel",
"citation",
"citizen",
"citric",
"citrus",
"city",
"civic",
"civil",
"clad",
"claim",
"clambake",
"clammy",
"clamor",
"clamp",
"clamshell",
"clang",
"clanking",
"clapped",
"clapper",
"clapping",
"clarify",
"clarinet",
"clarity",
"clash",
"clasp",
"class",
"clatter",
"clause",
"clavicle",
"claw",
"clay",
"clean",
"clear",
"cleat",
"cleaver",
"cleft",
"clench",
"clergyman",
"clerical",
"clerk",
"clever",
"clicker",
"client",
"climate",
"climatic",
"cling",
"clinic",
"clinking",
"clip",
"clique",
"cloak",
"clobber",
"clock",
"clone",
"cloning",
"closable",
"closure",
"clothes",
"clothing",
"cloud",
"clover",
"clubbed",
"clubbing",
"clubhouse",
"clump",
"clumsily",
"clumsy",
"clunky",
"clustered",
"clutch",
"clutter",
"coach",
"coagulant",
"coastal",
"coaster",
"coasting",
"coastland",
"coastline",
"coat",
"coauthor",
"cobalt",
"cobbler",
"cobweb",
"cocoa",
"coconut",
"cod",
"coeditor",
"coerce",
"coexist",
"coffee",
"cofounder",
"cognition",
"cognitive",
"cogwheel",
"coherence",
"coherent",
"cohesive",
"coil",
"coke",
"cola",
"cold",
"coleslaw",
"coliseum",
"collage",
"collapse",
"collar",
"collected",
"collector",
"collide",
"collie",
"collision",
"colonial",
"colonist",
"colonize",
"colony",
"colossal",
"colt",
"coma",
"come",
"comfort",
"comfy",
"comic",
"coming",
"comma",
"commence",
"commend",
"comment",
"commerce",
"commode",
"commodity",
"commodore",
"common",
"commotion",
"commute",
"commuting",
"compacted",
"compacter",
"compactly",
"compactor",
"companion",
"company",
"compare",
"compel",
"compile",
"comply",
"component",
"composed",
"composer",
"composite",
"compost",
"composure",
"compound",
"compress",
"comprised",
"computer",
"computing",
"comrade",
"concave",
"conceal",
"conceded",
"concept",
"concerned",
"concert",
"conch",
"concierge",
"concise",
"conclude",
"concrete",
"concur",
"condense",
"condiment",
"condition",
"condone",
"conducive",
"conductor",
"conduit",
"cone",
"confess",
"confetti",
"confidant",
"confident",
"confider",
"confiding",
"configure",
"confined",
"confining",
"confirm",
"conflict",
"conform",
"confound",
"confront",
"confused",
"confusing",
"confusion",
"congenial",
"congested",
"congrats",
"congress",
"conical",
"conjoined",
"conjure",
"conjuror",
"connected",
"connector",
"consensus",
"consent",
"console",
"consoling",
"consonant",
"constable",
"constant",
"constrain",
"constrict",
"construct",
"consult",
"consumer",
"consuming",
"contact",
"container",
"contempt",
"contend",
"contented",
"contently",
"contents",
"contest",
"context",
"contort",
"contour",
"contrite",
"control",
"contusion",
"convene",
"convent",
"copartner",
"cope",
"copied",
"copier",
"copilot",
"coping",
"copious",
"copper",
"copy",
"coral",
"cork",
"cornball",
"cornbread",
"corncob",
"cornea",
"corned",
"corner",
"cornfield",
"cornflake",
"cornhusk",
"cornmeal",
"cornstalk",
"corny",
"coronary",
"coroner",
"corporal",
"corporate",
"corral",
"correct",
"corridor",
"corrode",
"corroding",
"corrosive",
"corsage",
"corset",
"cortex",
"cosigner",
"cosmetics",
"cosmic",
"cosmos",
"cosponsor",
"cost",
"cottage",
"cotton",
"couch",
"cough",
"could",
"countable",
"countdown",
"counting",
"countless",
"country",
"county",
"courier",
"covenant",
"cover",
"coveted",
"coveting",
"coyness",
"cozily",
"coziness",
"cozy",
"crabbing",
"crabgrass",
"crablike",
"crabmeat",
"cradle",
"cradling",
"crafter",
"craftily",
"craftsman",
"craftwork",
"crafty",
"cramp",
"cranberry",
"crane",
"cranial",
"cranium",
"crank",
"crate",
"crave",
"craving",
"crawfish",
"crawlers",
"crawling",
"crayfish",
"crayon",
"crazed",
"crazily",
"craziness",
"crazy",
"creamed",
"creamer",
"creamlike",
"crease",
"creasing",
"creatable",
"create",
"creation",
"creative",
"creature",
"credible",
"credibly",
"credit",
"creed",
"creme",
"creole",
"crepe",
"crept",
"crescent",
"crested",
"cresting",
"crestless",
"crevice",
"crewless",
"crewman",
"crewmate",
"crib",
"cricket",
"cried",
"crier",
"crimp",
"crimson",
"cringe",
"cringing",
"crinkle",
"crinkly",
"crisped",
"crisping",
"crisply",
"crispness",
"crispy",
"criteria",
"critter",
"croak",
"crock",
"crook",
"croon",
"crop",
"cross",
"crouch",
"crouton",
"crowbar",
"crowd",
"crown",
"crucial",
"crudely",
"crudeness",
"cruelly",
"cruelness",
"cruelty",
"crumb",
"crummiest",
"crummy",
"crumpet",
"crumpled",
"cruncher",
"crunching",
"crunchy",
"crusader",
"crushable",
"crushed",
"crusher",
"crushing",
"crust",
"crux",
"crying",
"cryptic",
"crystal",
"cubbyhole",
"cube",
"cubical",
"cubicle",
"cucumber",
"cuddle",
"cuddly",
"cufflink",
"culinary",
"culminate",
"culpable",
"culprit",
"cultivate",
"cultural",
"culture",
"cupbearer",
"cupcake",
"cupid",
"cupped",
"cupping",
"curable",
"curator",
"curdle",
"cure",
"curfew",
"curing",
"curled",
"curler",
"curliness",
"curling",
"curly",
"curry",
"curse",
"cursive",
"cursor",
"curtain",
"curtly",
"curtsy",
"curvature",
"curve",
"curvy",
"cushy",
"cusp",
"cussed",
"custard",
"custodian",
"custody",
"customary",
"customer",
"customize",
"customs",
"cut",
"cycle",
"cyclic",
"cycling",
"cyclist",
"cylinder",
"cymbal",
"cytoplasm",
"cytoplast",
"dab",
"dad",
"daffodil",
"dagger",
"daily",
"daintily",
"dainty",
"dairy",
"daisy",
"dallying",
"dance",
"dancing",
"dandelion",
"dander",
"dandruff",
"dandy",
"danger",
"dangle",
"dangling",
"daredevil",
"dares",
"daringly",
"darkened",
"darkening",
"darkish",
"darkness",
"darkroom",
"darling",
"darn",
"dart",
"darwinism",
"dash",
"dastardly",
"data",
"datebook",
"dating",
"daughter",
"daunting",
"dawdler",
"dawn",
"daybed",
"daybreak",
"daycare",
"daydream",
"daylight",
"daylong",
"dayroom",
"daytime",
"dazzler",
"dazzling",
"deacon",
"deafening",
"deafness",
"dealer",
"dealing",
"dealmaker",
"dealt",
"dean",
"debatable",
"debate",
"debating",
"debit",
"debrief",
"debtless",
"debtor",
"debug",
"debunk",
"decade",
"decaf",
"decal",
"decathlon",
"decay",
"deceased",
"deceit",
"deceiver",
"deceiving",
"december",
"decency",
"decent",
"deception",
"deceptive",
"decibel",
"decidable",
"decimal",
"decimeter",
"decipher",
"deck",
"declared",
"decline",
"decode",
"decompose",
"decorated",
"decorator",
"decoy",
"decrease",
"decree",
"dedicate",
"dedicator",
"deduce",
"deduct",
"deed",
"deem",
"deepen",
"deeply",
"deepness",
"deface",
"defacing",
"defame",
"default",
"defeat",
"defection",
"defective",
"defendant",
"defender",
"defense",
"defensive",
"deferral",
"deferred",
"defiance",
"defiant",
"defile",
"defiling",
"define",
"definite",
"deflate",
"deflation",
"deflator",
"deflected",
"deflector",
"defog",
"deforest",
"defraud",
"defrost",
"deftly",
"defuse",
"defy",
"degraded",
"degrading",
"degrease",
"degree",
"dehydrate",
"deity",
"dejected",
"delay",
"delegate",
"delegator",
"delete",
"deletion",
"delicacy",
"delicate",
"delicious",
"delighted",
"delirious",
"delirium",
"deliverer",
"delivery",
"delouse",
"delta",
"deluge",
"delusion",
"deluxe",
"demanding",
"demeaning",
"demeanor",
"demise",
"democracy",
"democrat",
"demote",
"demotion",
"demystify",
"denatured",
"deniable",
"denial",
"denim",
"denote",
"dense",
"density",
"dental",
"dentist",
"denture",
"deny",
"deodorant",
"deodorize",
"departed",
"departure",
"depict",
"deplete",
"depletion",
"deplored",
"deploy",
"deport",
"depose",
"depraved",
"depravity",
"deprecate",
"depress",
"deprive",
"depth",
"deputize",
"deputy",
"derail",
"deranged",
"derby",
"derived",
"desecrate",
"deserve",
"deserving",
"designate",
"designed",
"designer",
"designing",
"deskbound",
"desktop",
"deskwork",
"desolate",
"despair",
"despise",
"despite",
"destiny",
"destitute",
"destruct",
"detached",
"detail",
"detection",
"detective",
"detector",
"detention",
"detergent",
"detest",
"detonate",
"detonator",
"detoxify",
"detract",
"deuce",
"devalue",
"deviancy",
"deviant",
"deviate",
"deviation",
"deviator",
"device",
"devious",
"devotedly",
"devotee",
"devotion",
"devourer",
"devouring",
"devoutly",
"dexterity",
"dexterous",
"diabetes",
"diabetic",
"diabolic",
"diagnoses",
"diagnosis",
"diagram",
"dial",
"diameter",
"diaper",
"diaphragm",
"diary",
"dice",
"dicing",
"dictate",
"dictation",
"dictator",
"difficult",
"diffused",
"diffuser",
"diffusion",
"diffusive",
"dig",
"dilation",
"diligence",
"diligent",
"dill",
"dilute",
"dime",
"diminish",
"dimly",
"dimmed",
"dimmer",
"dimness",
"dimple",
"diner",
"dingbat",
"dinghy",
"dinginess",
"dingo",
"dingy",
"dining",
"dinner",
"diocese",
"dioxide",
"diploma",
"dipped",
"dipper",
"dipping",
"directed",
"direction",
"directive",
"directly",
"directory",
"direness",
"dirtiness",
"disabled",
"disagree",
"disallow",
"disarm",
"disarray",
"disaster",
"disband",
"disbelief",
"disburse",
"discard",
"discern",
"discharge",
"disclose",
"discolor",
"discount",
"discourse",
"discover",
"discuss",
"disdain",
"disengage",
"disfigure",
"disgrace",
"dish",
"disinfect",
"disjoin",
"disk",
"dislike",
"disliking",
"dislocate",
"dislodge",
"disloyal",
"dismantle",
"dismay",
"dismiss",
"dismount",
"disobey",
"disorder",
"disown",
"disparate",
"disparity",
"dispatch",
"dispense",
"dispersal",
"dispersed",
"disperser",
"displace",
"display",
"displease",
"disposal",
"dispose",
"disprove",
"dispute",
"disregard",
"disrupt",
"dissuade",
"distance",
"distant",
"distaste",
"distill",
"distinct",
"distort",
"distract",
"distress",
"district",
"distrust",
"ditch",
"ditto",
"ditzy",
"dividable",
"divided",
"dividend",
"dividers",
"dividing",
"divinely",
"diving",
"divinity",
"divisible",
"divisibly",
"division",
"divisive",
"divorcee",
"dizziness",
"dizzy",
"doable",
"docile",
"dock",
"doctrine",
"document",
"dodge",
"dodgy",
"doily",
"doing",
"dole",
"dollar",
"dollhouse",
"dollop",
"dolly",
"dolphin",
"domain",
"domelike",
"domestic",
"dominion",
"dominoes",
"donated",
"donation",
"donator",
"donor",
"donut",
"doodle",
"doorbell",
"doorframe",
"doorknob",
"doorman",
"doormat",
"doornail",
"doorpost",
"doorstep",
"doorstop",
"doorway",
"doozy",
"dork",
"dormitory",
"dorsal",
"dosage",
"dose",
"dotted",
"doubling",
"douche",
"dove",
"down",
"dowry",
"doze",
"drab",
"dragging",
"dragonfly",
"dragonish",
"dragster",
"drainable",
"drainage",
"drained",
"drainer",
"drainpipe",
"dramatic",
"dramatize",
"drank",
"drapery",
"drastic",
"draw",
"dreaded",
"dreadful",
"dreadlock",
"dreamboat",
"dreamily",
"dreamland",
"dreamless",
"dreamlike",
"dreamt",
"dreamy",
"drearily",
"dreary",
"drench",
"dress",
"drew",
"dribble",
"dried",
"drier",
"drift",
"driller",
"drilling",
"drinkable",
"drinking",
"dripping",
"drippy",
"drivable",
"driven",
"driver",
"driveway",
"driving",
"drizzle",
"drizzly",
"drone",
"drool",
"droop",
"drop-down",
"dropbox",
"dropkick",
"droplet",
"dropout",
"dropper",
"drove",
"drown",
"drowsily",
"drudge",
"drum",
"dry",
"dubbed",
"dubiously",
"duchess",
"duckbill",
"ducking",
"duckling",
"ducktail",
"ducky",
"duct",
"dude",
"duffel",
"dugout",
"duh",
"duke",
"duller",
"dullness",
"duly",
"dumping",
"dumpling",
"dumpster",
"duo",
"dupe",
"duplex",
"duplicate",
"duplicity",
"durable",
"durably",
"duration",
"duress",
"during",
"dusk",
"dust",
"dutiful",
"duty",
"duvet",
"dwarf",
"dweeb",
"dwelled",
"dweller",
"dwelling",
"dwindle",
"dwindling",
"dynamic",
"dynamite",
"dynasty",
"dyslexia",
"dyslexic",
"each",
"eagle",
"earache",
"eardrum",
"earflap",
"earful",
"earlobe",
"early",
"earmark",
"earmuff",
"earphone",
"earpiece",
"earplugs",
"earring",
"earshot",
"earthen",
"earthlike",
"earthling",
"earthly",
"earthworm",
"earthy",
"earwig",
"easeful",
"easel",
"easiest",
"easily",
"easiness",
"easing",
"eastbound",
"eastcoast",
"easter",
"eastward",
"eatable",
"eaten",
"eatery",
"eating",
"eats",
"ebay",
"ebony",
"ebook",
"ecard",
"eccentric",
"echo",
"eclair",
"eclipse",
"ecologist",
"ecology",
"economic",
"economist",
"economy",
"ecosphere",
"ecosystem",
"edge",
"edginess",
"edging",
"edgy",
"edition",
"editor",
"educated",
"education",
"educator",
"eel",
"effective",
"effects",
"efficient",
"effort",
"eggbeater",
"egging",
"eggnog",
"eggplant",
"eggshell",
"egomaniac",
"egotism",
"egotistic",
"either",
"eject",
"elaborate",
"elastic",
"elated",
"elbow",
"eldercare",
"elderly",
"eldest",
"electable",
"election",
"elective",
"elephant",
"elevate",
"elevating",
"elevation",
"elevator",
"eleven",
"elf",
"eligible",
"eligibly",
"eliminate",
"elite",
"elitism",
"elixir",
"elk",
"ellipse",
"elliptic",
"elm",
"elongated",
"elope",
"eloquence",
"eloquent",
"elsewhere",
"elude",
"elusive",
"elves",
"email",
"embargo",
"embark",
"embassy",
"embattled",
"embellish",
"ember",
"embezzle",
"emblaze",
"emblem",
"embody",
"embolism",
"emboss",
"embroider",
"emcee",
"emerald",
"emergency",
"emission",
"emit",
"emote",
"emoticon",
"emotion",
"empathic",
"empathy",
"emperor",
"emphases",
"emphasis",
"emphasize",
"emphatic",
"empirical",
"employed",
"employee",
"employer",
"emporium",
"empower",
"emptier",
"emptiness",
"empty",
"emu",
"enable",
"enactment",
"enamel",
"enchanted",
"enchilada",
"encircle",
"enclose",
"enclosure",
"encode",
"encore",
"encounter",
"encourage",
"encroach",
"encrust",
"encrypt",
"endanger",
"endeared",
"endearing",
"ended",
"ending",
"endless",
"endnote",
"endocrine",
"endorphin",
"endorse",
"endowment",
"endpoint",
"endurable",
"endurance",
"enduring",
"energetic",
"energize",
"energy",
"enforced",
"enforcer",
"engaged",
"engaging",
"engine",
"engorge",
"engraved",
"engraver",
"engraving",
"engross",
"engulf",
"enhance",
"enigmatic",
"enjoyable",
"enjoyably",
"enjoyer",
"enjoying",
"enjoyment",
"enlarged",
"enlarging",
"enlighten",
"enlisted",
"enquirer",
"enrage",
"enrich",
"enroll",
"enslave",
"ensnare",
"ensure",
"entail",
"entangled",
"entering",
"entertain",
"enticing",
"entire",
"entitle",
"entity",
"entomb",
"entourage",
"entrap",
"entree",
"entrench",
"entrust",
"entryway",
"entwine",
"enunciate",
"envelope",
"enviable",
"enviably",
"envious",
"envision",
"envoy",
"envy",
"enzyme",
"epic",
"epidemic",
"epidermal",
"epidermis",
"epidural",
"epilepsy",
"epileptic",
"epilogue",
"epiphany",
"episode",
"equal",
"equate",
"equation",
"equator",
"equinox",
"equipment",
"equity",
"equivocal",
"eradicate",
"erasable",
"erased",
"eraser",
"erasure",
"ergonomic",
"errand",
"errant",
"erratic",
"error",
"erupt",
"escalate",
"escalator",
"escapable",
"escapade",
"escapist",
"escargot",
"eskimo",
"esophagus",
"espionage",
"espresso",
"esquire",
"essay",
"essence",
"essential",
"establish",
"estate",
"esteemed",
"estimate",
"estimator",
"estranged",
"estrogen",
"etching",
"eternal",
"eternity",
"ethanol",
"ether",
"ethically",
"ethics",
"euphemism",
"evacuate",
"evacuee",
"evade",
"evaluate",
"evaluator",
"evaporate",
"evasion",
"evasive",
"even",
"everglade",
"evergreen",
"everybody",
"everyday",
"everyone",
"evict",
"evidence",
"evident",
"evil",
"evoke",
"evolution",
"evolve",
"exact",
"exalted",
"example",
"excavate",
"excavator",
"exceeding",
"exception",
"excess",
"exchange",
"excitable",
"exciting",
"exclaim",
"exclude",
"excluding",
"exclusion",
"exclusive",
"excretion",
"excretory",
"excursion",
"excusable",
"excusably",
"excuse",
"exemplary",
"exemplify",
"exemption",
"exerciser",
"exert",
"exes",
"exfoliate",
"exhale",
"exhaust",
"exhume",
"exile",
"existing",
"exit",
"exodus",
"exonerate",
"exorcism",
"exorcist",
"expand",
"expanse",
"expansion",
"expansive",
"expectant",
"expedited",
"expediter",
"expel",
"expend",
"expenses",
"expensive",
"expert",
"expire",
"expiring",
"explain",
"expletive",
"explicit",
"explode",
"exploit",
"explore",
"exploring",
"exponent",
"exporter",
"exposable",
"expose",
"exposure",
"express",
"expulsion",
"exquisite",
"extended",
"extending",
"extent",
"extenuate",
"exterior",
"external",
"extinct",
"extortion",
"extradite",
"extras",
"extrovert",
"extrude",
"extruding",
"exuberant",
"fable",
"fabric",
"fabulous",
"facebook",
"facecloth",
"facedown",
"faceless",
"facelift",
"faceplate",
"faceted",
"facial",
"facility",
"facing",
"facsimile",
"faction",
"factoid",
"factor",
"factsheet",
"factual",
"faculty",
"fade",
"fading",
"failing",
"falcon",
"fall",
"0",
"falsify",
"fame",
"familiar",
"family",
"famine",
"famished",
"fanatic",
"fancied",
"fanciness",
"fancy",
"fanfare",
"fang",
"fanning",
"fantasize",
"fantastic",
"fantasy",
"fascism",
"fastball",
"faster",
"fasting",
"fastness",
"faucet",
"favorable",
"favorably",
"favored",
"favoring",
"favorite",
"fax",
"feast",
"federal",
"fedora",
"feeble",
"feed",
"feel",
"feisty",
"feline",
"felt-tip",
"feminine",
"feminism",
"feminist",
"feminize",
"femur",
"fence",
"fencing",
"fender",
"ferment",
"fernlike",
"ferocious",
"ferocity",
"ferret",
"ferris",
"ferry",
"fervor",
"fester",
"festival",
"festive",
"festivity",
"fetal",
"fetch",
"fever",
"fiber",
"fiction",
"fiddle",
"fiddling",
"fidelity",
"fidgeting",
"fidgety",
"fifteen",
"fifth",
"fiftieth",
"fifty",
"figment",
"figure",
"figurine",
"filing",
"filled",
"filler",
"filling",
"film",
"filter",
"filth",
"filtrate",
"finale",
"finalist",
"finalize",
"finally",
"finance",
"financial",
"finch",
"fineness",
"finer",
"finicky",
"finished",
"finisher",
"finishing",
"finite",
"finless",
"finlike",
"fiscally",
"fit",
"five",
"flaccid",
"flagman",
"flagpole",
"flagship",
"flagstick",
"flagstone",
"flail",
"flakily",
"flaky",
"flame",
"flammable",
"flanked",
"flanking",
"flannels",
"flap",
"flaring",
"flashback",
"flashbulb",
"flashcard",
"flashily",
"flashing",
"flashy",
"flask",
"flatbed",
"flatfoot",
"flatly",
"flatness",
"flatten",
"flattered",
"flatterer",
"flattery",
"flattop",
"flatware",
"flatworm",
"flavored",
"flavorful",
"flavoring",
"flaxseed",
"fled",
"fleshed",
"fleshy",
"flick",
"flier",
"flight",
"flinch",
"fling",
"flint",
"flip",
"flirt",
"float",
"flock",
"flogging",
"flop",
"floral",
"florist",
"floss",
"flounder",
"flyable",
"flyaway",
"flyer",
"flying",
"flyover",
"flypaper",
"foam",
"foe",
"fog",
"foil",
"folic",
"folk",
"follicle",
"follow",
"fondling",
"fondly",
"fondness",
"fondue",
"font",
"food",
"fool",
"footage",
"football",
"footbath",
"footboard",
"footer",
"footgear",
"foothill",
"foothold",
"footing",
"footless",
"footman",
"footnote",
"footpad",
"footpath",
"footprint",
"footrest",
"footsie",
"footsore",
"footwear",
"footwork",
"fossil",
"foster",
"founder",
"founding",
"fountain",
"fox",
"foyer",
"fraction",
"fracture",
"fragile",
"fragility",
"fragment",
"fragrance",
"fragrant",
"frail",
"frame",
"framing",
"frantic",
"fraternal",
"frayed",
"fraying",
"frays",
"freckled",
"freckles",
"freebase",
"freebee",
"freebie",
"freedom",
"freefall",
"freehand",
"freeing",
"freeload",
"freely",
"freemason",
"freeness",
"freestyle",
"freeware",
"freeway",
"freewill",
"freezable",
"freezing",
"freight",
"french",
"frenzied",
"frenzy",
"frequency",
"frequent",
"fresh",
"fretful",
"fretted",
"friction",
"friday",
"fridge",
"fried",
"friend",
"frighten",
"frightful",
"frigidity",
"frigidly",
"frill",
"fringe",
"frisbee",
"frisk",
"fritter",
"frivolous",
"frolic",
"from",
"front",
"frostbite",
"frosted",
"frostily",
"frosting",
"frostlike",
"frosty",
"froth",
"frown",
"frozen",
"fructose",
"frugality",
"frugally",
"fruit",
"frustrate",
"frying",
"gab",
"gaffe",
"gag",
"gainfully",
"gaining",
"gains",
"gala",
"gallantly",
"galleria",
"gallery",
"galley",
"gallon",
"gallows",
"gallstone",
"galore",
"galvanize",
"gambling",
"game",
"gaming",
"gamma",
"gander",
"gangly",
"gangrene",
"gangway",
"gap",
"garage",
"garbage",
"garden",
"gargle",
"garland",
"garlic",
"garment",
"garnet",
"garnish",
"garter",
"gas",
"gatherer",
"gathering",
"gating",
"gauging",
"gauntlet",
"gauze",
"gave",
"gawk",
"gazing",
"gear",
"gecko",
"geek",
"geiger",
"gem",
"gender",
"generic",
"generous",
"genetics",
"genre",
"gentile",
"gentleman",
"gently",
"gents",
"geography",
"geologic",
"geologist",
"geology",
"geometric",
"geometry",
"geranium",
"gerbil",
"geriatric",
"germicide",
"germinate",
"germless",
"germproof",
"gestate",
"gestation",
"gesture",
"getaway",
"getting",
"getup",
"giant",
"gibberish",
"giblet",
"giddily",
"giddiness",
"giddy",
"gift",
"gigabyte",
"gigahertz",
"gigantic",
"giggle",
"giggling",
"giggly",
"gigolo",
"gilled",
"gills",
"gimmick",
"girdle",
"giveaway",
"given",
"giver",
"giving",
"gizmo",
"gizzard",
"glacial",
"glacier",
"glade",
"gladiator",
"gladly",
"glamorous",
"glamour",
"glance",
"glancing",
"glandular",
"glare",
"glaring",
"glass",
"glaucoma",
"glazing",
"gleaming",
"gleeful",
"glider",
"gliding",
"glimmer",
"glimpse",
"glisten",
"glitch",
"glitter",
"glitzy",
"gloater",
"gloating",
"gloomily",
"gloomy",
"glorified",
"glorifier",
"glorify",
"glorious",
"glory",
"gloss",
"glove",
"glowing",
"glowworm",
"glucose",
"glue",
"gluten",
"glutinous",
"glutton",
"gnarly",
"gnat",
"goal",
"goatskin",
"goes",
"goggles",
"going",
"goldfish",
"goldmine",
"goldsmith",
"golf",
"goliath",
"gonad",
"gondola",
"gone",
"gong",
"good",
"gooey",
"goofball",
"goofiness",
"goofy",
"google",
"goon",
"gopher",
"gore",
"gorged",
"gorgeous",
"gory",
"gosling",
"gossip",
"gothic",
"gotten",
"gout",
"gown",
"grab",
"graceful",
"graceless",
"gracious",
"gradation",
"graded",
"grader",
"gradient",
"grading",
"gradually",
"graduate",
"graffiti",
"grafted",
"grafting",
"grain",
"granddad",
"grandkid",
"grandly",
"grandma",
"grandpa",
"grandson",
"granite",
"granny",
"granola",
"grant",
"granular",
"grape",
"graph",
"grapple",
"grappling",
"grasp",
"grass",
"gratified",
"gratify",
"grating",
"gratitude",
"gratuity",
"gravel",
"graveness",
"graves",
"graveyard",
"gravitate",
"gravity",
"gravy",
"gray",
"grazing",
"greasily",
"greedily",
"greedless",
"greedy",
"green",
"greeter",
"greeting",
"grew",
"greyhound",
"grid",
"grief",
"grievance",
"grieving",
"grievous",
"grill",
"grimace",
"grimacing",
"grime",
"griminess",
"grimy",
"grinch",
"grinning",
"grip",
"gristle",
"grit",
"groggily",
"groggy",
"groin",
"groom",
"groove",
"grooving",
"groovy",
"grope",
"ground",
"grouped",
"grout",
"grove",
"grower",
"growing",
"growl",
"grub",
"grudge",
"grudging",
"grueling",
"gruffly",
"grumble",
"grumbling",
"grumbly",
"grumpily",
"grunge",
"grunt",
"guacamole",
"guidable",
"guidance",
"guide",
"guiding",
"guileless",
"guise",
"gulf",
"gullible",
"gully",
"gulp",
"gumball",
"gumdrop",
"gumminess",
"gumming",
"gummy",
"gurgle",
"gurgling",
"guru",
"gush",
"gusto",
"gusty",
"gutless",
"guts",
"gutter",
"guy",
"guzzler",
"gyration",
"habitable",
"habitant",
"habitat",
"habitual",
"hacked",
"hacker",
"hacking",
"hacksaw",
"had",
"haggler",
"haiku",
"half",
"halogen",
"halt",
"halved",
"halves",
"hamburger",
"hamlet",
"hammock",
"hamper",
"hamster",
"hamstring",
"handbag",
"handball",
"handbook",
"handbrake",
"handcart",
"handclap",
"handclasp",
"handcraft",
"handcuff",
"handed",
"handful",
"handgrip",
"handgun",
"handheld",
"handiness",
"handiwork",
"handlebar",
"handled",
"handler",
"handling",
"handmade",
"handoff",
"handpick",
"handprint",
"handrail",
"handsaw",
"handset",
"handsfree",
"handshake",
"handstand",
"handwash",
"handwork",
"handwoven",
"handwrite",
"handyman",
"hangnail",
"hangout",
"hangover",
"hangup",
"hankering",
"hankie",
"hanky",
"haphazard",
"happening",
"happier",
"happiest",
"happily",
"happiness",
"happy",
"harbor",
"hardcopy",
"hardcore",
"hardcover",
"harddisk",
"hardened",
"hardener",
"hardening",
"hardhat",
"hardhead",
"hardiness",
"hardly",
"hardness",
"hardship",
"hardware",
"hardwired",
"hardwood",
"hardy",
"harmful",
"harmless",
"harmonica",
"harmonics",
"harmonize",
"harmony",
"harness",
"harpist",
"harsh",
"harvest",
"hash",
"hassle",
"haste",
"hastily",
"hastiness",
"hasty",
"hatbox",
"hatchback",
"hatchery",
"hatchet",
"hatching",
"hatchling",
"hate",
"hatless",
"hatred",
"haunt",
"haven",
"hazard",
"hazelnut",
"hazily",
"haziness",
"hazing",
"hazy",
"headache",
"headband",
"headboard",
"headcount",
"headdress",
"headed",
"header",
"headfirst",
"headgear",
"heading",
"headlamp",
"headless",
"headlock",
"headphone",
"headpiece",
"headrest",
"headroom",
"headscarf",
"headset",
"headsman",
"headstand",
"headstone",
"headway",
"headwear",
"heap",
"heat",
"heave",
"heavily",
"heaviness",
"heaving",
"hedge",
"hedging",
"heftiness",
"hefty",
"helium",
"helmet",
"helper",
"helpful",
"helping",
"helpless",
"helpline",
"hemlock",
"hemstitch",
"hence",
"henchman",
"henna",
"herald",
"herbal",
"herbicide",
"herbs",
"heritage",
"hermit",
"heroics",
"heroism",
"herring",
"herself",
"hertz",
"hesitancy",
"hesitant",
"hesitate",
"hexagon",
"hexagram",
"hubcap",
"huddle",
"huddling",
"huff",
"hug",
"hula",
"hulk",
"hull",
"human",
"humble",
"humbling",
"humbly",
"humid",
"humiliate",
"humility",
"humming",
"hummus",
"humongous",
"humorist",
"humorless",
"humorous",
"humpback",
"humped",
"humvee",
"hunchback",
"hundredth",
"hunger",
"hungrily",
"hungry",
"hunk",
"hunter",
"hunting",
"huntress",
"huntsman",
"hurdle",
"hurled",
"hurler",
"hurling",
"hurray",
"hurricane",
"hurried",
"hurry",
"hurt",
"husband",
"hush",
"husked",
"huskiness",
"hut",
"hybrid",
"hydrant",
"hydrated",
"hydration",
"hydrogen",
"hydroxide",
"hyperlink",
"hypertext",
"hyphen",
"hypnoses",
"hypnosis",
"hypnotic",
"hypnotism",
"hypnotist",
"hypnotize",
"hypocrisy",
"hypocrite",
"ibuprofen",
"ice",
"iciness",
"icing",
"icky",
"icon",
"icy",
"idealism",
"idealist",
"idealize",
"ideally",
"idealness",
"identical",
"identify",
"identity",
"ideology",
"idiocy",
"idiom",
"idly",
"igloo",
"ignition",
"ignore",
"iguana",
"illicitly",
"illusion",
"illusive",
"image",
"imaginary",
"imagines",
"imaging",
"imbecile",
"imitate",
"imitation",
"immature",
"immerse",
"immersion",
"imminent",
"immobile",
"immodest",
"immorally",
"immortal",
"immovable",
"immovably",
"immunity",
"immunize",
"impaired",
"impale",
"impart",
"impatient",
"impeach",
"impeding",
"impending",
"imperfect",
"imperial",
"impish",
"implant",
"implement",
"implicate",
"implicit",
"implode",
"implosion",
"implosive",
"imply",
"impolite",
"important",
"importer",
"impose",
"imposing",
"impotence",
"impotency",
"impotent",
"impound",
"imprecise",
"imprint",
"imprison",
"impromptu",
"improper",
"improve",
"improving",
"improvise",
"imprudent",
"impulse",
"impulsive",
"impure",
"impurity",
"iodine",
"iodize",
"ion",
"ipad",
"iphone",
"ipod",
"irate",
"irk",
"iron",
"irregular",
"irrigate",
"irritable",
"irritably",
"irritant",
"irritate",
"islamic",
"islamist",
"isolated",
"isolating",
"isolation",
"isotope",
"issue",
"issuing",
"italicize",
"italics",
"item",
"itinerary",
"itunes",
"ivory",
"ivy",
"jab",
"jackal",
"jacket",
"jackknife",
"jackpot",
"jailbird",
"jailbreak",
"jailer",
"jailhouse",
"jalapeno",
"jam",
"janitor",
"january",
"jargon",
"jarring",
"jasmine",
"jaundice",
"jaunt",
"java",
"jawed",
"jawless",
"jawline",
"jaws",
"jaybird",
"jaywalker",
"jazz",
"jeep",
"jeeringly",
"jellied",
"jelly",
"jersey",
"jester",
"jet",
"jiffy",
"jigsaw",
"jimmy",
"jingle",
"jingling",
"jinx",
"jitters",
"jittery",
"job",
"jockey",
"jockstrap",
"jogger",
"jogging",
"john",
"joining",
"jokester",
"jokingly",
"jolliness",
"jolly",
"jolt",
"jot",
"jovial",
"joyfully",
"joylessly",
"joyous",
"joyride",
"joystick",
"jubilance",
"jubilant",
"judge",
"judgingly",
"judicial",
"judiciary",
"judo",
"juggle",
"juggling",
"jugular",
"juice",
"juiciness",
"juicy",
"jujitsu",
"jukebox",
"july",
"jumble",
"jumbo",
"jump",
"junction",
"juncture",
"june",
"junior",
"juniper",
"junkie",
"junkman",
"junkyard",
"jurist",
"juror",
"jury",
"justice",
"justifier",
"justify",
"justly",
"justness",
"juvenile",
"kabob",
"kangaroo",
"karaoke",
"karate",
"karma",
"kebab",
"keenly",
"keenness",
"keep",
"keg",
"kelp",
"kennel",
"kept",
"kerchief",
"kerosene",
"kettle",
"kick",
"kiln",
"kilobyte",
"kilogram",
"kilometer",
"kilowatt",
"kilt",
"kimono",
"kindle",
"kindling",
"kindly",
"kindness",
"kindred",
"kinetic",
"kinfolk",
"king",
"kinship",
"kinsman",
"kinswoman",
"kissable",
"kisser",
"kissing",
"kitchen",
"kite",
"kitten",
"kitty",
"kiwi",
"kleenex",
"knapsack",
"knee",
"knelt",
"knickers",
"knoll",
"koala",
"kooky",
"kosher",
"krypton",
"kudos",
"kung",
"labored",
"laborer",
"laboring",
"laborious",
"labrador",
"ladder",
"ladies",
"ladle",
"ladybug",
"ladylike",
"lagged",
"lagging",
"lagoon",
"lair",
"lake",
"lance",
"landed",
"landfall",
"landfill",
"landing",
"landlady",
"landless",
"landline",
"landlord",
"landmark",
"landmass",
"landmine",
"landowner",
"landscape",
"landside",
"landslide",
"language",
"lankiness",
"lanky",
"lantern",
"lapdog",
"lapel",
"lapped",
"lapping",
"laptop",
"lard",
"large",
"lark",
"lash",
"lasso",
"last",
"latch",
"late",
"lather",
"latitude",
"latrine",
"latter",
"latticed",
"launch",
"launder",
"laundry",
"laurel",
"lavender",
"lavish",
"laxative",
"lazily",
"laziness",
"lazy",
"lecturer",
"left",
"legacy",
"legal",
"legend",
"legged",
"leggings",
"legible",
"legibly",
"legislate",
"lego",
"legroom",
"legume",
"legwarmer",
"legwork",
"lemon",
"lend",
"length",
"lens",
"lent",
"leotard",
"lesser",
"letdown",
"lethargic",
"lethargy",
"letter",
"lettuce",
"level",
"leverage",
"levers",
"levitate",
"levitator",
"liability",
"liable",
"liberty",
"librarian",
"library",
"licking",
"licorice",
"lid",
"life",
"lifter",
"lifting",
"liftoff",
"ligament",
"likely",
"likeness",
"likewise",
"liking",
"lilac",
"lilly",
"lily",
"limb",
"limeade",
"limelight",
"limes",
"limit",
"limping",
"limpness",
"line",
"lingo",
"linguini",
"linguist",
"lining",
"linked",
"linoleum",
"linseed",
"lint",
"lion",
"lip",
"liquefy",
"liqueur",
"liquid",
"lisp",
"list",
"litigate",
"litigator",
"litmus",
"litter",
"little",
"livable",
"lived",
"lively",
"liver",
"livestock",
"lividly",
"living",
"lizard",
"lubricant",
"lubricate",
"lucid",
"luckily",
"luckiness",
"luckless",
"lucrative",
"ludicrous",
"lugged",
"lukewarm",
"lullaby",
"lumber",
"luminance",
"luminous",
"lumpiness",
"lumping",
"lumpish",
"lunacy",
"lunar",
"lunchbox",
"luncheon",
"lunchroom",
"lunchtime",
"lung",
"lurch",
"lure",
"luridness",
"lurk",
"lushly",
"lushness",
"luster",
"lustfully",
"lustily",
"lustiness",
"lustrous",
"lusty",
"luxurious",
"luxury",
"lying",
"lyrically",
"lyricism",
"lyricist",
"lyrics",
"macarena",
"macaroni",
"macaw",
"mace",
"machine",
"machinist",
"magazine",
"magenta",
"maggot",
"magical",
"magician",
"magma",
"magnesium",
"magnetic",
"magnetism",
"magnetize",
"magnifier",
"magnify",
"magnitude",
"magnolia",
"mahogany",
"maimed",
"majestic",
"majesty",
"majorette",
"majority",
"makeover",
"maker",
"makeshift",
"making",
"malformed",
"malt",
"mama",
"mammal",
"mammary",
"mammogram",
"manager",
"managing",
"manatee",
"mandarin",
"mandate",
"mandatory",
"mandolin",
"manger",
"mangle",
"mango",
"mangy",
"manhandle",
"manhole",
"manhood",
"manhunt",
"manicotti",
"manicure",
"manifesto",
"manila",
"mankind",
"manlike",
"manliness",
"manly",
"manmade",
"manned",
"mannish",
"manor",
"manpower",
"mantis",
"mantra",
"manual",
"many",
"map",
"marathon",
"marauding",
"marbled",
"marbles",
"marbling",
"march",
"mardi",
"margarine",
"margarita",
"margin",
"marigold",
"marina",
"marine",
"marital",
"maritime",
"marlin",
"marmalade",
"maroon",
"married",
"marrow",
"marry",
"marshland",
"marshy",
"marsupial",
"marvelous",
"marxism",
"mascot",
"masculine",
"mashed",
"mashing",
"massager",
"masses",
"massive",
"mastiff",
"matador",
"matchbook",
"matchbox",
"matcher",
"matching",
"matchless",
"material",
"maternal",
"maternity",
"math",
"mating",
"matriarch",
"matrimony",
"matrix",
"matron",
"matted",
"matter",
"maturely",
"maturing",
"maturity",
"mauve",
"maverick",
"maximize",
"maximum",
"maybe",
"mayday",
"mayflower",
"moaner",
"moaning",
"mobile",
"mobility",
"mobilize",
"mobster",
"mocha",
"mocker",
"mockup",
"modified",
"modify",
"modular",
"modulator",
"module",
"moisten",
"moistness",
"moisture",
"molar",
"molasses",
"mold",
"molecular",
"molecule",
"molehill",
"mollusk",
"mom",
"monastery",
"monday",
"monetary",
"monetize",
"moneybags",
"moneyless",
"moneywise",
"mongoose",
"mongrel",
"monitor",
"monkhood",
"monogamy",
"monogram",
"monologue",
"monopoly",
"monorail",
"monotone",
"monotype",
"monoxide",
"monsieur",
"monsoon",
"monstrous",
"monthly",
"monument",
"moocher",
"moodiness",
"moody",
"mooing",
"moonbeam",
"mooned",
"moonlight",
"moonlike",
"moonlit",
"moonrise",
"moonscape",
"moonshine",
"moonstone",
"moonwalk",
"mop",
"morale",
"morality",
"morally",
"morbidity",
"morbidly",
"morphine",
"morphing",
"morse",
"mortality",
"mortally",
"mortician",
"mortified",
"mortify",
"mortuary",
"mosaic",
"mossy",
"most",
"mothball",
"mothproof",
"motion",
"motivate",
"motivator",
"motive",
"motocross",
"motor",
"motto",
"mountable",
"mountain",
"mounted",
"mounting",
"mourner",
"mournful",
"mouse",
"mousiness",
"moustache",
"mousy",
"mouth",
"movable",
"move",
"movie",
"moving",
"mower",
"mowing",
"much",
"muck",
"mud",
"mug",
"mulberry",
"mulch",
"mule",
"mulled",
"mullets",
"multiple",
"multiply",
"multitask",
"multitude",
"mumble",
"mumbling",
"mumbo",
"mummified",
"mummify",
"mummy",
"mumps",
"munchkin",
"mundane",
"municipal",
"muppet",
"mural",
"murkiness",
"murky",
"murmuring",
"muscular",
"museum",
"mushily",
"mushiness",
"mushroom",
"mushy",
"music",
"musket",
"muskiness",
"musky",
"mustang",
"mustard",
"muster",
"mustiness",
"musty",
"mutable",
"mutate",
"mutation",
"mute",
"mutilated",
"mutilator",
"mutiny",
"mutt",
"mutual",
"muzzle",
"myself",
"myspace",
"mystified",
"mystify",
"myth",
"nacho",
"nag",
"nail",
"name",
"naming",
"nanny",
"nanometer",
"nape",
"napkin",
"napped",
"napping",
"nappy",
"narrow",
"nastily",
"nastiness",
"national",
"native",
"nativity",
"natural",
"nature",
"naturist",
"nautical",
"navigate",
"navigator",
"navy",
"nearby",
"nearest",
"nearly",
"nearness",
"neatly",
"neatness",
"nebula",
"nebulizer",
"nectar",
"negate",
"negation",
"negative",
"neglector",
"negligee",
"negligent",
"negotiate",
"nemeses",
"nemesis",
"neon",
"nephew",
"nerd",
"nervous",
"nervy",
"nest",
"net",
"neurology",
"neuron",
"neurosis",
"neurotic",
"neuter",
"neutron",
"never",
"next",
"nibble",
"nickname",
"nicotine",
"niece",
"nifty",
"nimble",
"nimbly",
"nineteen",
"ninetieth",
"ninja",
"nintendo",
"ninth",
"nuclear",
"nuclei",
"nucleus",
"nugget",
"nullify",
"number",
"numbing",
"numbly",
"numbness",
"numeral",
"numerate",
"numerator",
"numeric",
"numerous",
"nuptials",
"nursery",
"nursing",
"nurture",
"nutcase",
"nutlike",
"nutmeg",
"nutrient",
"nutshell",
"nuttiness",
"nutty",
"nuzzle",
"nylon",
"oaf",
"oak",
"oasis",
"oat",
"obedience",
"obedient",
"obituary",
"object",
"obligate",
"obliged",
"oblivion",
"oblivious",
"oblong",
"obnoxious",
"oboe",
"obscure",
"obscurity",
"observant",
"observer",
"observing",
"obsessed",
"obsession",
"obsessive",
"obsolete",
"obstacle",
"obstinate",
"obstruct",
"obtain",
"obtrusive",
"obtuse",
"obvious",
"occultist",
"occupancy",
"occupant",
"occupier",
"occupy",
"ocean",
"ocelot",
"octagon",
"octane",
"october",
"octopus",
"ogle",
"oil",
"oink",
"ointment",
"okay",
"old",
"olive",
"olympics",
"omega",
"omen",
"ominous",
"omission",
"omit",
"omnivore",
"onboard",
"oncoming",
"ongoing",
"onion",
"online",
"onlooker",
"only",
"onscreen",
"onset",
"onshore",
"onslaught",
"onstage",
"onto",
"onward",
"onyx",
"oops",
"ooze",
"oozy",
"opacity",
"opal",
"open",
"operable",
"operate",
"operating",
"operation",
"operative",
"operator",
"opium",
"opossum",
"opponent",
"oppose",
"opposing",
"opposite",
"oppressed",
"oppressor",
"opt",
"opulently",
"osmosis",
"other",
"otter",
"ouch",
"ought",
"ounce",
"outage",
"outback",
"outbid",
"outboard",
"outbound",
"outbreak",
"outburst",
"outcast",
"outclass",
"outcome",
"outdated",
"outdoors",
"outer",
"outfield",
"outfit",
"outflank",
"outgoing",
"outgrow",
"outhouse",
"outing",
"outlast",
"outlet",
"outline",
"outlook",
"outlying",
"outmatch",
"outmost",
"outnumber",
"outplayed",
"outpost",
"outpour",
"output",
"outrage",
"outrank",
"outreach",
"outright",
"outscore",
"outsell",
"outshine",
"outshoot",
"outsider",
"outskirts",
"outsmart",
"outsource",
"outspoken",
"outtakes",
"outthink",
"outward",
"outweigh",
"outwit",
"oval",
"ovary",
"oven",
"overact",
"overall",
"overarch",
"overbid",
"overbill",
"overbite",
"overblown",
"overboard",
"overbook",
"overbuilt",
"overcast",
"overcoat",
"overcome",
"overcook",
"overcrowd",
"overdraft",
"overdrawn",
"overdress",
"overdrive",
"overdue",
"overeager",
"overeater",
"overexert",
"overfed",
"overfeed",
"overfill",
"overflow",
"overfull",
"overgrown",
"overhand",
"overhang",
"overhaul",
"overhead",
"overhear",
"overheat",
"overhung",
"overjoyed",
"overkill",
"overlabor",
"overlaid",
"overlap",
"overlay",
"overload",
"overlook",
"overlord",
"overlying",
"overnight",
"overpass",
"overpay",
"overplant",
"overplay",
"overpower",
"overprice",
"overrate",
"overreach",
"overreact",
"override",
"overripe",
"overrule",
"overrun",
"overshoot",
"overshot",
"oversight",
"oversized",
"oversleep",
"oversold",
"overspend",
"overstate",
"overstay",
"overstep",
"overstock",
"overstuff",
"oversweet",
"overtake",
"overthrow",
"overtime",
"overtly",
"overtone",
"overture",
"overturn",
"overuse",
"overvalue",
"overview",
"overwrite",
"owl",
"oxford",
"oxidant",
"oxidation",
"oxidize",
"oxidizing",
"oxygen",
"oxymoron",
"oyster",
"ozone",
"paced",
"pacemaker",
"pacific",
"pacifier",
"pacifism",
"pacifist",
"pacify",
"padded",
"padding",
"paddle",
"paddling",
"padlock",
"pagan",
"pager",
"paging",
"pajamas",
"palace",
"palatable",
"palm",
"palpable",
"palpitate",
"paltry",
"pampered",
"pamperer",
"pampers",
"pamphlet",
"panama",
"pancake",
"pancreas",
"panda",
"pandemic",
"pang",
"panhandle",
"panic",
"panning",
"panorama",
"panoramic",
"panther",
"pantomime",
"pantry",
"pants",
"pantyhose",
"paparazzi",
"papaya",
"paper",
"paprika",
"papyrus",
"parabola",
"parachute",
"parade",
"paradox",
"paragraph",
"parakeet",
"paralegal",
"paralyses",
"paralysis",
"paralyze",
"paramedic",
"parameter",
"paramount",
"parasail",
"parasite",
"parasitic",
"parcel",
"parched",
"parchment",
"pardon",
"parish",
"parka",
"parking",
"parkway",
"parlor",
"parmesan",
"parole",
"parrot",
"parsley",
"parsnip",
"partake",
"parted",
"parting",
"partition",
"partly",
"partner",
"partridge",
"party",
"passable",
"passably",
"passage",
"passcode",
"passenger",
"passerby",
"passing",
"passion",
"passive",
"passivism",
"passover",
"passport",
"password",
"pasta",
"pasted",
"pastel",
"pastime",
"pastor",
"pastrami",
"pasture",
"pasty",
"patchwork",
"patchy",
"paternal",
"paternity",
"path",
"patience",
"patient",
"patio",
"patriarch",
"patriot",
"patrol",
"patronage",
"patronize",
"pauper",
"pavement",
"paver",
"pavestone",
"pavilion",
"paving",
"pawing",
"payable",
"payback",
"paycheck",
"payday",
"payee",
"payer",
"paying",
"payment",
"payphone",
"payroll",
"pebble",
"pebbly",
"pecan",
"pectin",
"peculiar",
"peddling",
"pediatric",
"pedicure",
"pedigree",
"pedometer",
"pegboard",
"pelican",
"pellet",
"pelt",
"pelvis",
"penalize",
"penalty",
"pencil",
"pendant",
"pending",
"penholder",
"penknife",
"pennant",
"penniless",
"penny",
"penpal",
"pension",
"pentagon",
"pentagram",
"pep",
"perceive",
"percent",
"perch",
"percolate",
"perennial",
"perfected",
"perfectly",
"perfume",
"periscope",
"perish",
"perjurer",
"perjury",
"perkiness",
"perky",
"perm",
"peroxide",
"perpetual",
"perplexed",
"persecute",
"persevere",
"persuaded",
"persuader",
"pesky",
"peso",
"pessimism",
"pessimist",
"pester",
"pesticide",
"petal",
"petite",
"petition",
"petri",
"petroleum",
"petted",
"petticoat",
"pettiness",
"petty",
"petunia",
"phantom",
"phobia",
"phoenix",
"phonebook",
"phoney",
"phonics",
"phoniness",
"phony",
"phosphate",
"photo",
"phrase",
"phrasing",
"placard",
"placate",
"placidly",
"plank",
"planner",
"plant",
"plasma",
"plaster",
"plastic",
"plated",
"platform",
"plating",
"platinum",
"platonic",
"platter",
"platypus",
"plausible",
"plausibly",
"playable",
"playback",
"player",
"playful",
"playgroup",
"playhouse",
"playing",
"playlist",
"playmaker",
"playmate",
"playoff",
"playpen",
"playroom",
"playset",
"plaything",
"playtime",
"plaza",
"pleading",
"pleat",
"pledge",
"plentiful",
"plenty",
"plethora",
"plexiglas",
"pliable",
"plod",
"plop",
"plot",
"plow",
"ploy",
"pluck",
"plug",
"plunder",
"plunging",
"plural",
"plus",
"plutonium",
"plywood",
"poach",
"pod",
"poem",
"poet",
"pogo",
"pointed",
"pointer",
"pointing",
"pointless",
"pointy",
"poise",
"poison",
"poker",
"poking",
"polar",
"police",
"policy",
"polio",
"polish",
"politely",
"polka",
"polo",
"polyester",
"polygon",
"polygraph",
"polymer",
"poncho",
"pond",
"pony",
"popcorn",
"pope",
"poplar",
"popper",
"poppy",
"popsicle",
"populace",
"popular",
"populate",
"porcupine",
"pork",
"porous",
"porridge",
"portable",
"portal",
"portfolio",
"porthole",
"portion",
"portly",
"portside",
"poser",
"posh",
"posing",
"possible",
"possibly",
"possum",
"postage",
"postal",
"postbox",
"postcard",
"posted",
"poster",
"posting",
"postnasal",
"posture",
"postwar",
"pouch",
"pounce",
"pouncing",
"pound",
"pouring",
"pout",
"powdered",
"powdering",
"powdery",
"power",
"powwow",
"pox",
"praising",
"prance",
"prancing",
"pranker",
"prankish",
"prankster",
"prayer",
"praying",
"preacher",
"preaching",
"preachy",
"preamble",
"precinct",
"precise",
"precision",
"precook",
"precut",
"predator",
"predefine",
"predict",
"preface",
"prefix",
"preflight",
"preformed",
"pregame",
"pregnancy",
"pregnant",
"preheated",
"prelaunch",
"prelaw",
"prelude",
"premiere",
"premises",
"premium",
"prenatal",
"preoccupy",
"preorder",
"prepaid",
"prepay",
"preplan",
"preppy",
"preschool",
"prescribe",
"preseason",
"preset",
"preshow",
"president",
"presoak",
"press",
"presume",
"presuming",
"preteen",
"pretended",
"pretender",
"pretense",
"pretext",
"pretty",
"pretzel",
"prevail",
"prevalent",
"prevent",
"preview",
"previous",
"prewar",
"prewashed",
"prideful",
"pried",
"primal",
"primarily",
"primary",
"primate",
"primer",
"primp",
"princess",
"print",
"prior",
"prism",
"prison",
"prissy",
"pristine",
"privacy",
"private",
"privatize",
"prize",
"proactive",
"probable",
"probably",
"probation",
"probe",
"probing",
"probiotic",
"problem",
"procedure",
"process",
"proclaim",
"procreate",
"procurer",
"prodigal",
"prodigy",
"produce",
"product",
"profane",
"profanity",
"professed",
"professor",
"profile",
"profound",
"profusely",
"progeny",
"prognosis",
"program",
"progress",
"projector",
"prologue",
"prolonged",
"promenade",
"prominent",
"promoter",
"promotion",
"prompter",
"promptly",
"prone",
"prong",
"pronounce",
"pronto",
"proofing",
"proofread",
"proofs",
"propeller",
"properly",
"property",
"proponent",
"proposal",
"propose",
"props",
"prorate",
"protector",
"protegee",
"proton",
"prototype",
"protozoan",
"protract",
"protrude",
"proud",
"provable",
"proved",
"proven",
"provided",
"provider",
"providing",
"province",
"proving",
"provoke",
"provoking",
"provolone",
"prowess",
"prowler",
"prowling",
"proximity",
"proxy",
"prozac",
"prude",
"prudishly",
"prune",
"pruning",
"pry",
"psychic",
"public",
"publisher",
"pucker",
"pueblo",
"pug",
"pull",
"pulmonary",
"pulp",
"pulsate",
"pulse",
"pulverize",
"puma",
"pumice",
"pummel",
"punch",
"punctual",
"punctuate",
"punctured",
"pungent",
"punisher",
"punk",
"pupil",
"puppet",
"puppy",
"purchase",
"pureblood",
"purebred",
"purely",
"pureness",
"purgatory",
"purge",
"purging",
"purifier",
"purify",
"purist",
"puritan",
"purity",
"purple",
"purplish",
"purposely",
"purr",
"purse",
"pursuable",
"pursuant",
"pursuit",
"purveyor",
"pushcart",
"pushchair",
"pusher",
"pushiness",
"pushing",
"pushover",
"pushpin",
"pushup",
"pushy",
"putdown",
"putt",
"puzzle",
"puzzling",
"pyramid",
"pyromania",
"python",
"quack",
"quadrant",
"quail",
"quaintly",
"quake",
"quaking",
"qualified",
"qualifier",
"qualify",
"quality",
"qualm",
"quantum",
"quarrel",
"quarry",
"quartered",
"quarterly",
"quarters",
"quartet",
"quench",
"query",
"quicken",
"quickly",
"quickness",
"quicksand",
"quickstep",
"quiet",
"quill",
"quilt",
"quintet",
"quintuple",
"quirk",
"quit",
"quiver",
"quizzical",
"quotable",
"quotation",
"quote",
"rabid",
"race",
"racing",
"racism",
"rack",
"racoon",
"radar",
"radial",
"radiance",
"radiantly",
"radiated",
"radiation",
"radiator",
"radio",
"radish",
"raffle",
"raft",
"rage",
"ragged",
"raging",
"ragweed",
"raider",
"railcar",
"railing",
"railroad",
"railway",
"raisin",
"rake",
"raking",
"rally",
"ramble",
"rambling",
"ramp",
"ramrod",
"ranch",
"rancidity",
"random",
"ranged",
"ranger",
"ranging",
"ranked",
"ranking",
"ransack",
"ranting",
"rants",
"rare",
"rarity",
"rascal",
"rash",
"rasping",
"ravage",
"raven",
"ravine",
"raving",
"ravioli",
"ravishing",
"reabsorb",
"reach",
"reacquire",
"reaction",
"reactive",
"reactor",
"reaffirm",
"ream",
"reanalyze",
"reappear",
"reapply",
"reappoint",
"reapprove",
"rearrange",
"rearview",
"reason",
"reassign",
"reassure",
"reattach",
"reawake",
"rebalance",
"rebate",
"rebel",
"rebirth",
"reboot",
"reborn",
"rebound",
"rebuff",
"rebuild",
"rebuilt",
"reburial",
"rebuttal",
"recall",
"recant",
"recapture",
"recast",
"recede",
"recent",
"recess",
"recharger",
"recipient",
"recital",
"recite",
"reckless",
"reclaim",
"recliner",
"reclining",
"recluse",
"reclusive",
"recognize",
"recoil",
"recollect",
"recolor",
"reconcile",
"reconfirm",
"reconvene",
"recopy",
"record",
"recount",
"recoup",
"recovery",
"recreate",
"rectal",
"rectangle",
"rectified",
"rectify",
"recycled",
"recycler",
"recycling",
"reemerge",
"reenact",
"reenter",
"reentry",
"reexamine",
"referable",
"referee",
"reference",
"refill",
"refinance",
"refined",
"refinery",
"refining",
"refinish",
"reflected",
"reflector",
"reflex",
"reflux",
"refocus",
"refold",
"reforest",
"reformat",
"reformed",
"reformer",
"reformist",
"refract",
"refrain",
"refreeze",
"refresh",
"refried",
"refueling",
"refund",
"refurbish",
"refurnish",
"refusal",
"refuse",
"refusing",
"refutable",
"refute",
"regain",
"regalia",
"regally",
"reggae",
"regime",
"region",
"register",
"registrar",
"registry",
"regress",
"regretful",
"regroup",
"regular",
"regulate",
"regulator",
"rehab",
"reheat",
"rehire",
"rehydrate",
"reimburse",
"reissue",
"reiterate",
"rejoice",
"rejoicing",
"rejoin",
"rekindle",
"relapse",
"relapsing",
"relatable",
"related",
"relation",
"relative",
"relax",
"relay",
"relearn",
"release",
"relenting",
"reliable",
"reliably",
"reliance",
"reliant",
"relic",
"relieve",
"relieving",
"relight",
"relish",
"relive",
"reload",
"relocate",
"relock",
"reluctant",
"rely",
"remake",
"remark",
"remarry",
"rematch",
"remedial",
"remedy",
"remember",
"reminder",
"remindful",
"remission",
"remix",
"remnant",
"remodeler",
"remold",
"remorse",
"remote",
"removable",
"removal",
"removed",
"remover",
"removing",
"rename",
"renderer",
"rendering",
"rendition",
"renegade",
"renewable",
"renewably",
"renewal",
"renewed",
"renounce",
"renovate",
"renovator",
"rentable",
"rental",
"rented",
"renter",
"reoccupy",
"reoccur",
"reopen",
"reorder",
"repackage",
"repacking",
"repaint",
"repair",
"repave",
"repaying",
"repayment",
"repeal",
"repeated",
"repeater",
"repent",
"rephrase",
"replace",
"replay",
"replica",
"reply",
"reporter",
"repose",
"repossess",
"repost",
"repressed",
"reprimand",
"reprint",
"reprise",
"reproach",
"reprocess",
"reproduce",
"reprogram",
"reps",
"reptile",
"reptilian",
"repugnant",
"repulsion",
"repulsive",
"repurpose",
"reputable",
"reputably",
"request",
"require",
"requisite",
"reroute",
"rerun",
"resale",
"resample",
"rescuer",
"reseal",
"research",
"reselect",
"reseller",
"resemble",
"resend",
"resent",
"reset",
"reshape",
"reshoot",
"reshuffle",
"residence",
"residency",
"resident",
"residual",
"residue",
"resigned",
"resilient",
"resistant",
"resisting",
"resize",
"resolute",
"resolved",
"resonant",
"resonate",
"resort",
"resource",
"respect",
"resubmit",
"result",
"resume",
"resupply",
"resurface",
"resurrect",
"retail",
"retainer",
"retaining",
"retake",
"retaliate",
"retention",
"rethink",
"retinal",
"retired",
"retiree",
"retiring",
"retold",
"retool",
"retorted",
"retouch",
"retrace",
"retract",
"retrain",
"retread",
"retreat",
"retrial",
"retrieval",
"retriever",
"retry",
"return",
"retying",
"retype",
"reunion",
"reunite",
"reusable",
"reuse",
"reveal",
"reveler",
"revenge",
"revenue",
"reverb",
"revered",
"reverence",
"reverend",
"reversal",
"reverse",
"reversing",
"reversion",
"revert",
"revisable",
"revise",
"revision",
"revisit",
"revivable",
"revival",
"reviver",
"reviving",
"revocable",
"revoke",
"revolt",
"revolver",
"revolving",
"reward",
"rewash",
"rewind",
"rewire",
"reword",
"rework",
"rewrap",
"rewrite",
"rhyme",
"ribbon",
"ribcage",
"rice",
"riches",
"richly",
"richness",
"rickety",
"ricotta",
"riddance",
"ridden",
"ride",
"riding",
"rifling",
"rift",
"rigging",
"rigid",
"rigor",
"rimless",
"rimmed",
"rind",
"rink",
"rinse",
"rinsing",
"riot",
"ripcord",
"ripeness",
"ripening",
"ripping",
"ripple",
"rippling",
"riptide",
"rise",
"rising",
"risk",
"risotto",
"ritalin",
"ritzy",
"rival",
"riverbank",
"riverbed",
"riverboat",
"riverside",
"riveter",
"riveting",
"roamer",
"roaming",
"roast",
"robbing",
"robe",
"robin",
"robotics",
"robust",
"rockband",
"rocker",
"rocket",
"rockfish",
"rockiness",
"rocking",
"rocklike",
"rockslide",
"rockstar",
"rocky",
"rogue",
"roman",
"romp",
"rope",
"roping",
"roster",
"rosy",
"rotten",
"rotting",
"rotunda",
"roulette",
"rounding",
"roundish",
"roundness",
"roundup",
"roundworm",
"routine",
"routing",
"rover",
"roving",
"royal",
"rubbed",
"rubber",
"rubbing",
"rubble",
"rubdown",
"ruby",
"ruckus",
"rudder",
"rug",
"ruined",
"rule",
"rumble",
"rumbling",
"rummage",
"rumor",
"runaround",
"rundown",
"runner",
"running",
"runny",
"runt",
"runway",
"rupture",
"rural",
"ruse",
"rush",
"rust",
"rut",
"sabbath",
"sabotage",
"sacrament",
"sacred",
"sacrifice",
"sadden",
"saddlebag",
"saddled",
"saddling",
"sadly",
"sadness",
"safari",
"safeguard",
"safehouse",
"safely",
"safeness",
"saffron",
"saga",
"sage",
"sagging",
"saggy",
"said",
"saint",
"sake",
"salad",
"salami",
"salaried",
"salary",
"saline",
"salon",
"saloon",
"salsa",
"salt",
"salutary",
"salute",
"salvage",
"salvaging",
"salvation",
"same",
"sample",
"sampling",
"sanction",
"sanctity",
"sanctuary",
"sandal",
"sandbag",
"sandbank",
"sandbar",
"sandblast",
"sandbox",
"sanded",
"sandfish",
"sanding",
"sandlot",
"sandpaper",
"sandpit",
"sandstone",
"sandstorm",
"sandworm",
"sandy",
"sanitary",
"sanitizer",
"sank",
"santa",
"sapling",
"sappiness",
"sappy",
"sarcasm",
"sarcastic",
"sardine",
"sash",
"sasquatch",
"sassy",
"satchel",
"satiable",
"satin",
"satirical",
"satisfied",
"satisfy",
"saturate",
"saturday",
"sauciness",
"saucy",
"sauna",
"savage",
"savanna",
"saved",
"savings",
"savior",
"savor",
"saxophone",
"say",
"scabbed",
"scabby",
"scalded",
"scalding",
"scale",
"scaling",
"scallion",
"scallop",
"scalping",
"scam",
"scandal",
"scanner",
"scanning",
"scant",
"scapegoat",
"scarce",
"scarcity",
"scarecrow",
"scared",
"scarf",
"scarily",
"scariness",
"scarring",
"scary",
"scavenger",
"scenic",
"schedule",
"schematic",
"scheme",
"scheming",
"schilling",
"schnapps",
"scholar",
"science",
"scientist",
"scion",
"scoff",
"scolding",
"scone",
"scoop",
"scooter",
"scope",
"scorch",
"scorebook",
"scorecard",
"scored",
"scoreless",
"scorer",
"scoring",
"scorn",
"scorpion",
"scotch",
"scoundrel",
"scoured",
"scouring",
"scouting",
"scouts",
"scowling",
"scrabble",
"scraggly",
"scrambled",
"scrambler",
"scrap",
"scratch",
"scrawny",
"screen",
"scribble",
"scribe",
"scribing",
"scrimmage",
"script",
"scroll",
"scrooge",
"scrounger",
"scrubbed",
"scrubber",
"scruffy",
"scrunch",
"scrutiny",
"scuba",
"scuff",
"sculptor",
"sculpture",
"scurvy",
"scuttle",
"secluded",
"secluding",
"seclusion",
"second",
"secrecy",
"secret",
"sectional",
"sector",
"secular",
"securely",
"security",
"sedan",
"sedate",
"sedation",
"sedative",
"sediment",
"seduce",
"seducing",
"segment",
"seismic",
"seizing",
"seldom",
"selected",
"selection",
"selective",
"selector",
"self",
"seltzer",
"semantic",
"semester",
"semicolon",
"semifinal",
"seminar",
"semisoft",
"semisweet",
"senate",
"senator",
"send",
"senior",
"senorita",
"sensation",
"sensitive",
"sensitize",
"sensually",
"sensuous",
"sepia",
"september",
"septic",
"septum",
"sequel",
"sequence",
"sequester",
"series",
"sermon",
"serotonin",
"serpent",
"serrated",
"serve",
"service",
"serving",
"sesame",
"sessions",
"setback",
"setting",
"settle",
"settling",
"setup",
"sevenfold",
"seventeen",
"seventh",
"seventy",
"severity",
"shabby",
"shack",
"shaded",
"shadily",
"shadiness",
"shading",
"shadow",
"shady",
"shaft",
"shakable",
"shakily",
"shakiness",
"shaking",
"shaky",
"shale",
"shallot",
"shallow",
"shame",
"shampoo",
"shamrock",
"shank",
"shanty",
"shape",
"shaping",
"share",
"sharpener",
"sharper",
"sharpie",
"sharply",
"sharpness",
"shawl",
"sheath",
"shed",
"sheep",
"sheet",
"shelf",
"shell",
"shelter",
"shelve",
"shelving",
"sherry",
"shield",
"shifter",
"shifting",
"shiftless",
"shifty",
"shimmer",
"shimmy",
"shindig",
"shine",
"shingle",
"shininess",
"shining",
"shiny",
"ship",
"shirt",
"shivering",
"shock",
"shone",
"shoplift",
"shopper",
"shopping",
"shoptalk",
"shore",
"shortage",
"shortcake",
"shortcut",
"shorten",
"shorter",
"shorthand",
"shortlist",
"shortly",
"shortness",
"shorts",
"shortwave",
"shorty",
"shout",
"shove",
"showbiz",
"showcase",
"showdown",
"shower",
"showgirl",
"showing",
"showman",
"shown",
"showoff",
"showpiece",
"showplace",
"showroom",
"showy",
"shrank",
"shrapnel",
"shredder",
"shredding",
"shrewdly",
"shriek",
"shrill",
"shrimp",
"shrine",
"shrink",
"shrivel",
"shrouded",
"shrubbery",
"shrubs",
"shrug",
"shrunk",
"shucking",
"shudder",
"shuffle",
"shuffling",
"shun",
"shush",
"shut",
"shy",
"siamese",
"siberian",
"sibling",
"siding",
"sierra",
"siesta",
"sift",
"sighing",
"silenced",
"silencer",
"silent",
"silica",
"silicon",
"silk",
"silliness",
"silly",
"silo",
"silt",
"silver",
"similarly",
"simile",
"simmering",
"simple",
"simplify",
"simply",
"sincere",
"sincerity",
"singer",
"singing",
"single",
"singular",
"sinister",
"sinless",
"sinner",
"sinuous",
"sip",
"siren",
"sister",
"sitcom",
"sitter",
"sitting",
"situated",
"situation",
"sixfold",
"sixteen",
"sixth",
"sixties",
"sixtieth",
"sixtyfold",
"sizable",
"sizably",
"size",
"sizing",
"sizzle",
"sizzling",
"skater",
"skating",
"skedaddle",
"skeletal",
"skeleton",
"skeptic",
"sketch",
"skewed",
"skewer",
"skid",
"skied",
"skier",
"skies",
"skiing",
"skilled",
"skillet",
"skillful",
"skimmed",
"skimmer",
"skimming",
"skimpily",
"skincare",
"skinhead",
"skinless",
"skinning",
"skinny",
"skintight",
"skipper",
"skipping",
"skirmish",
"skirt",
"skittle",
"skydiver",
"skylight",
"skyline",
"skype",
"skyrocket",
"skyward",
"slab",
"slacked",
"slacker",
"slacking",
"slackness",
"slacks",
"slain",
"slam",
"slander",
"slang",
"slapping",
"slapstick",
"slashed",
"slashing",
"slate",
"slather",
"slaw",
"sled",
"sleek",
"sleep",
"sleet",
"sleeve",
"slept",
"sliceable",
"sliced",
"slicer",
"slicing",
"slick",
"slider",
"slideshow",
"sliding",
"slighted",
"slighting",
"slightly",
"slimness",
"slimy",
"slinging",
"slingshot",
"slinky",
"slip",
"slit",
"sliver",
"slobbery",
"slogan",
"sloped",
"sloping",
"sloppily",
"sloppy",
"slot",
"slouching",
"slouchy",
"sludge",
"slug",
"slum",
"slurp",
"slush",
"sly",
"small",
"smartly",
"smartness",
"smasher",
"smashing",
"smashup",
"smell",
"smelting",
"smile",
"smilingly",
"smirk",
"smite",
"smith",
"smitten",
"smock",
"smog",
"smoked",
"smokeless",
"smokiness",
"smoking",
"smoky",
"smolder",
"smooth",
"smother",
"smudge",
"smudgy",
"smuggler",
"smuggling",
"smugly",
"smugness",
"snack",
"snagged",
"snaking",
"snap",
"snare",
"snarl",
"snazzy",
"sneak",
"sneer",
"sneeze",
"sneezing",
"snide",
"sniff",
"snippet",
"snipping",
"snitch",
"snooper",
"snooze",
"snore",
"snoring",
"snorkel",
"snort",
"snout",
"snowbird",
"snowboard",
"snowbound",
"snowcap",
"snowdrift",
"snowdrop",
"snowfall",
"snowfield",
"snowflake",
"snowiness",
"snowless",
"snowman",
"snowplow",
"snowshoe",
"snowstorm",
"snowsuit",
"snowy",
"snub",
"snuff",
"snuggle",
"snugly",
"snugness",
"speak",
"spearfish",
"spearhead",
"spearman",
"spearmint",
"species",
"specimen",
"specked",
"speckled",
"specks",
"spectacle",
"spectator",
"spectrum",
"speculate",
"speech",
"speed",
"spellbind",
"speller",
"spelling",
"spendable",
"spender",
"spending",
"spent",
"spew",
"sphere",
"spherical",
"sphinx",
"spider",
"spied",
"spiffy",
"spill",
"spilt",
"spinach",
"spinal",
"spindle",
"spinner",
"spinning",
"spinout",
"spinster",
"spiny",
"spiral",
"spirited",
"spiritism",
"spirits",
"spiritual",
"splashed",
"splashing",
"splashy",
"splatter",
"spleen",
"splendid",
"splendor",
"splice",
"splicing",
"splinter",
"splotchy",
"splurge",
"spoilage",
"spoiled",
"spoiler",
"spoiling",
"spoils",
"spoken",
"spokesman",
"sponge",
"spongy",
"sponsor",
"spoof",
"spookily",
"spooky",
"spool",
"spoon",
"spore",
"sporting",
"sports",
"sporty",
"spotless",
"spotlight",
"spotted",
"spotter",
"spotting",
"spotty",
"spousal",
"spouse",
"spout",
"sprain",
"sprang",
"sprawl",
"spray",
"spree",
"sprig",
"spring",
"sprinkled",
"sprinkler",
"sprint",
"sprite",
"sprout",
"spruce",
"sprung",
"spry",
"spud",
"spur",
"sputter",
"spyglass",
"squabble",
"squad",
"squall",
"squander",
"squash",
"squatted",
"squatter",
"squatting",
"squeak",
"squealer",
"squealing",
"squeamish",
"squeegee",
"squeeze",
"squeezing",
"squid",
"squiggle",
"squiggly",
"squint",
"squire",
"squirt",
"squishier",
"squishy",
"stability",
"stabilize",
"stable",
"stack",
"stadium",
"staff",
"stage",
"staging",
"stagnant",
"stagnate",
"stainable",
"stained",
"staining",
"stainless",
"stalemate",
"staleness",
"stalling",
"stallion",
"stamina",
"stammer",
"stamp",
"stand",
"stank",
"staple",
"stapling",
"starboard",
"starch",
"stardom",
"stardust",
"starfish",
"stargazer",
"staring",
"stark",
"starless",
"starlet",
"starlight",
"starlit",
"starring",
"starry",
"starship",
"starter",
"starting",
"startle",
"startling",
"startup",
"starved",
"starving",
"stash",
"state",
"static",
"statistic",
"statue",
"stature",
"status",
"statute",
"statutory",
"staunch",
"stays",
"steadfast",
"steadier",
"steadily",
"steadying",
"steam",
"steed",
"steep",
"steerable",
"steering",
"steersman",
"stegosaur",
"stellar",
"stem",
"stench",
"stencil",
"step",
"stereo",
"sterile",
"sterility",
"sterilize",
"sterling",
"sternness",
"sternum",
"stew",
"stick",
"stiffen",
"stiffly",
"stiffness",
"stifle",
"stifling",
"stillness",
"stilt",
"stimulant",
"stimulate",
"stimuli",
"stimulus",
"stinger",
"stingily",
"stinging",
"stingray",
"stingy",
"stinking",
"stinky",
"stipend",
"stipulate",
"stir",
"stitch",
"stock",
"stoic",
"stoke",
"stole",
"stomp",
"stonewall",
"stoneware",
"stonework",
"stoning",
"stony",
"stood",
"stooge",
"stool",
"stoop",
"stoplight",
"stoppable",
"stoppage",
"stopped",
"stopper",
"stopping",
"stopwatch",
"storable",
"storage",
"storeroom",
"storewide",
"storm",
"stout",
"stove",
"stowaway",
"stowing",
"straddle",
"straggler",
"strained",
"strainer",
"straining",
"strangely",
"stranger",
"strangle",
"strategic",
"strategy",
"stratus",
"straw",
"stray",
"streak",
"stream",
"street",
"strength",
"strenuous",
"strep",
"stress",
"stretch",
"strewn",
"stricken",
"strict",
"stride",
"strife",
"strike",
"striking",
"strive",
"striving",
"strobe",
"strode",
"stroller",
"strongbox",
"strongly",
"strongman",
"struck",
"structure",
"strudel",
"struggle",
"strum",
"strung",
"strut",
"stubbed",
"stubble",
"stubbly",
"stubborn",
"stucco",
"stuck",
"student",
"studied",
"studio",
"study",
"stuffed",
"stuffing",
"stuffy",
"stumble",
"stumbling",
"stump",
"stung",
"stunned",
"stunner",
"stunning",
"stunt",
"stupor",
"sturdily",
"sturdy",
"styling",
"stylishly",
"stylist",
"stylized",
"stylus",
"suave",
"subarctic",
"subatomic",
"subdivide",
"subdued",
"subduing",
"subfloor",
"subgroup",
"subheader",
"subject",
"sublease",
"sublet",
"sublevel",
"sublime",
"submarine",
"submerge",
"submersed",
"submitter",
"subpanel",
"subpar",
"subplot",
"subprime",
"subscribe",
"subscript",
"subsector",
"subside",
"subsiding",
"subsidize",
"subsidy",
"subsoil",
"subsonic",
"substance",
"subsystem",
"subtext",
"subtitle",
"subtly",
"subtotal",
"subtract",
"subtype",
"suburb",
"subway",
"subwoofer",
"subzero",
"succulent",
"such",
"suction",
"sudden",
"sudoku",
"suds",
"sufferer",
"suffering",
"suffice",
"suffix",
"suffocate",
"suffrage",
"sugar",
"suggest",
"suing",
"suitable",
"suitably",
"suitcase",
"suitor",
"sulfate",
"sulfide",
"sulfite",
"sulfur",
"sulk",
"sullen",
"sulphate",
"sulphuric",
"sultry",
"superbowl",
"superglue",
"superhero",
"superior",
"superjet",
"superman",
"supermom",
"supernova",
"supervise",
"supper",
"supplier",
"supply",
"support",
"supremacy",
"supreme",
"surcharge",
"surely",
"sureness",
"surface",
"surfacing",
"surfboard",
"surfer",
"surgery",
"surgical",
"surging",
"surname",
"surpass",
"surplus",
"surprise",
"surreal",
"surrender",
"surrogate",
"surround",
"survey",
"survival",
"survive",
"surviving",
"survivor",
"sushi",
"suspect",
"suspend",
"suspense",
"sustained",
"sustainer",
"swab",
"swaddling",
"swagger",
"swampland",
"swan",
"swapping",
"swarm",
"sway",
"swear",
"sweat",
"sweep",
"swell",
"swept",
"swerve",
"swifter",
"swiftly",
"swiftness",
"swimmable",
"swimmer",
"swimming",
"swimsuit",
"swimwear",
"swinger",
"swinging",
"swipe",
"swirl",
"switch",
"swivel",
"swizzle",
"swooned",
"swoop",
"swoosh",
"swore",
"sworn",
"swung",
"sycamore",
"sympathy",
"symphonic",
"symphony",
"symptom",
"synapse",
"syndrome",
"synergy",
"synopses",
"synopsis",
"synthesis",
"synthetic",
"syrup",
"system",
"t-shirt",
"tabasco",
"tabby",
"tableful",
"tables",
"tablet",
"tableware",
"tabloid",
"tackiness",
"tacking",
"tackle",
"tackling",
"tacky",
"taco",
"tactful",
"tactical",
"tactics",
"tactile",
"tactless",
"tadpole",
"taekwondo",
"tag",
"tainted",
"take",
"taking",
"talcum",
"talisman",
"tall",
"talon",
"tamale",
"tameness",
"tamer",
"tamper",
"tank",
"tanned",
"tannery",
"tanning",
"tantrum",
"tapeless",
"tapered",
"tapering",
"tapestry",
"tapioca",
"tapping",
"taps",
"tarantula",
"target",
"tarmac",
"tarnish",
"tarot",
"tartar",
"tartly",
"tartness",
"task",
"tassel",
"taste",
"tastiness",
"tasting",
"tasty",
"tattered",
"tattle",
"tattling",
"tattoo",
"taunt",
"tavern",
"thank",
"that",
"thaw",
"theater",
"theatrics",
"thee",
"theft",
"theme",
"theology",
"theorize",
"thermal",
"thermos",
"thesaurus",
"these",
"thesis",
"thespian",
"thicken",
"thicket",
"thickness",
"thieving",
"thievish",
"thigh",
"thimble",
"thing",
"think",
"thinly",
"thinner",
"thinness",
"thinning",
"thirstily",
"thirsting",
"thirsty",
"thirteen",
"thirty",
"thong",
"thorn",
"those",
"thousand",
"thrash",
"thread",
"threaten",
"threefold",
"thrift",
"thrill",
"thrive",
"thriving",
"throat",
"throbbing",
"throng",
"throttle",
"throwaway",
"throwback",
"thrower",
"throwing",
"thud",
"thumb",
"thumping",
"thursday",
"thus",
"thwarting",
"thyself",
"tiara",
"tibia",
"tidal",
"tidbit",
"tidiness",
"tidings",
"tidy",
"tiger",
"tighten",
"tightly",
"tightness",
"tightrope",
"tightwad",
"tigress",
"tile",
"tiling",
"till",
"tilt",
"timid",
"timing",
"timothy",
"tinderbox",
"tinfoil",
"tingle",
"tingling",
"tingly",
"tinker",
"tinkling",
"tinsel",
"tinsmith",
"tint",
"tinwork",
"tiny",
"tipoff",
"tipped",
"tipper",
"tipping",
"tiptoeing",
"tiptop",
"tiring",
"tissue",
"trace",
"tracing",
"track",
"traction",
"tractor",
"trade",
"trading",
"tradition",
"traffic",
"tragedy",
"trailing",
"trailside",
"train",
"traitor",
"trance",
"tranquil",
"transfer",
"transform",
"translate",
"transpire",
"transport",
"transpose",
"trapdoor",
"trapeze",
"trapezoid",
"trapped",
"trapper",
"trapping",
"traps",
"trash",
"travel",
"traverse",
"travesty",
"tray",
"treachery",
"treading",
"treadmill",
"treason",
"treat",
"treble",
"tree",
"trekker",
"tremble",
"trembling",
"tremor",
"trench",
"trend",
"trespass",
"triage",
"trial",
"triangle",
"tribesman",
"tribunal",
"tribune",
"tributary",
"tribute",
"triceps",
"trickery",
"trickily",
"tricking",
"trickle",
"trickster",
"tricky",
"tricolor",
"tricycle",
"trident",
"tried",
"trifle",
"trifocals",
"trillion",
"trilogy",
"trimester",
"trimmer",
"trimming",
"trimness",
"trinity",
"trio",
"tripod",
"tripping",
"triumph",
"trivial",
"trodden",
"trolling",
"trombone",
"trophy",
"tropical",
"tropics",
"trouble",
"troubling",
"trough",
"trousers",
"trout",
"trowel",
"truce",
"truck",
"truffle",
"trump",
"trunks",
"trustable",
"trustee",
"trustful",
"trusting",
"trustless",
"truth",
"try",
"tubby",
"tubeless",
"tubular",
"tucking",
"tuesday",
"tug",
"tuition",
"tulip",
"tumble",
"tumbling",
"tummy",
"turban",
"turbine",
"turbofan",
"turbojet",
"turbulent",
"turf",
"turkey",
"turmoil",
"turret",
"turtle",
"tusk",
"tutor",
"tutu",
"tux",
"tweak",
"tweed",
"tweet",
"tweezers",
"twelve",
"twentieth",
"twenty",
"twerp",
"twice",
"twiddle",
"twiddling",
"twig",
"twilight",
"twine",
"twins",
"twirl",
"twistable",
"twisted",
"twister",
"twisting",
"twisty",
"twitch",
"twitter",
"tycoon",
"tying",
"tyke",
"udder",
"ultimate",
"ultimatum",
"ultra",
"umbilical",
"umbrella",
"umpire",
"unabashed",
"unable",
"unadorned",
"unadvised",
"unafraid",
"unaired",
"unaligned",
"unaltered",
"unarmored",
"unashamed",
"unaudited",
"unawake",
"unaware",
"unbaked",
"unbalance",
"unbeaten",
"unbend",
"unbent",
"unbiased",
"unbitten",
"unblended",
"unblessed",
"unblock",
"unbolted",
"unbounded",
"unboxed",
"unbraided",
"unbridle",
"unbroken",
"unbuckled",
"unbundle",
"unburned",
"unbutton",
"uncanny",
"uncapped",
"uncaring",
"uncertain",
"unchain",
"unchanged",
"uncharted",
"uncheck",
"uncivil",
"unclad",
"unclaimed",
"unclamped",
"unclasp",
"uncle",
"unclip",
"uncloak",
"unclog",
"unclothed",
"uncoated",
"uncoiled",
"uncolored",
"uncombed",
"uncommon",
"uncooked",
"uncork",
"uncorrupt",
"uncounted",
"uncouple",
"uncouth",
"uncover",
"uncross",
"uncrown",
"uncrushed",
"uncured",
"uncurious",
"uncurled",
"uncut",
"undamaged",
"undated",
"undaunted",
"undead",
"undecided",
"undefined",
"underage",
"underarm",
"undercoat",
"undercook",
"undercut",
"underdog",
"underdone",
"underfed",
"underfeed",
"underfoot",
"undergo",
"undergrad",
"underhand",
"underline",
"underling",
"undermine",
"undermost",
"underpaid",
"underpass",
"underpay",
"underrate",
"undertake",
"undertone",
"undertook",
"undertow",
"underuse",
"underwear",
"underwent",
"underwire",
"undesired",
"undiluted",
"undivided",
"undocked",
"undoing",
"undone",
"undrafted",
"undress",
"undrilled",
"undusted",
"undying",
"unearned",
"unearth",
"unease",
"uneasily",
"uneasy",
"uneatable",
"uneaten",
"unedited",
"unelected",
"unending",
"unengaged",
"unenvied",
"unequal",
"unethical",
"uneven",
"unexpired",
"unexposed",
"unfailing",
"unfair",
"unfasten",
"unfazed",
"unfeeling",
"unfiled",
"unfilled",
"unfitted",
"unfitting",
"unfixable",
"unfixed",
"unflawed",
"unfocused",
"unfold",
"unfounded",
"unframed",
"unfreeze",
"unfrosted",
"unfrozen",
"unfunded",
"unglazed",
"ungloved",
"unglue",
"ungodly",
"ungraded",
"ungreased",
"unguarded",
"unguided",
"unhappily",
"unhappy",
"unharmed",
"unhealthy",
"unheard",
"unhearing",
"unheated",
"unhelpful",
"unhidden",
"unhinge",
"unhitched",
"unholy",
"unhook",
"unicorn",
"unicycle",
"unified",
"unifier",
"uniformed",
"uniformly",
"unify",
"unimpeded",
"uninjured",
"uninstall",
"uninsured",
"uninvited",
"union",
"uniquely",
"unisexual",
"unison",
"unissued",
"unit",
"universal",
"universe",
"unjustly",
"unkempt",
"unkind",
"unknotted",
"unknowing",
"unknown",
"unlaced",
"unlatch",
"unlawful",
"unleaded",
"unlearned",
"unleash",
"unless",
"unleveled",
"unlighted",
"unlikable",
"unlimited",
"unlined",
"unlinked",
"unlisted",
"unlit",
"unlivable",
"unloaded",
"unloader",
"unlocked",
"unlocking",
"unlovable",
"unloved",
"unlovely",
"unloving",
"unluckily",
"unlucky",
"unmade",
"unmanaged",
"unmanned",
"unmapped",
"unmarked",
"unmasked",
"unmasking",
"unmatched",
"unmindful",
"unmixable",
"unmixed",
"unmolded",
"unmoral",
"unmovable",
"unmoved",
"unmoving",
"unnamable",
"unnamed",
"unnatural",
"unneeded",
"unnerve",
"unnerving",
"unnoticed",
"unopened",
"unopposed",
"unpack",
"unpadded",
"unpaid",
"unpainted",
"unpaired",
"unpaved",
"unpeeled",
"unpicked",
"unpiloted",
"unpinned",
"unplanned",
"unplanted",
"unpleased",
"unpledged",
"unplowed",
"unplug",
"unpopular",
"unproven",
"unquote",
"unranked",
"unrated",
"unraveled",
"unreached",
"unread",
"unreal",
"unreeling",
"unrefined",
"unrelated",
"unrented",
"unrest",
"unretired",
"unrevised",
"unrigged",
"unripe",
"unrivaled",
"unroasted",
"unrobed",
"unroll",
"unruffled",
"unruly",
"unrushed",
"unsaddle",
"unsafe",
"unsaid",
"unsalted",
"unsaved",
"unsavory",
"unscathed",
"unscented",
"unscrew",
"unsealed",
"unseated",
"unsecured",
"unseeing",
"unseemly",
"unseen",
"unselect",
"unselfish",
"unsent",
"unsettled",
"unshackle",
"unshaken",
"unshaved",
"unshaven",
"unsheathe",
"unshipped",
"unsightly",
"unsigned",
"unskilled",
"unsliced",
"unsmooth",
"unsnap",
"unsocial",
"unsoiled",
"unsold",
"unsolved",
"unsorted",
"unspoiled",
"unspoken",
"unstable",
"unstaffed",
"unstamped",
"unsteady",
"unsterile",
"unstirred",
"unstitch",
"unstopped",
"unstuck",
"unstuffed",
"unstylish",
"unsubtle",
"unsubtly",
"unsuited",
"unsure",
"unsworn",
"untagged",
"untainted",
"untaken",
"untamed",
"untangled",
"untapped",
"untaxed",
"unthawed",
"unthread",
"untidy",
"untie",
"until",
"untimed",
"untimely",
"untitled",
"untoasted",
"untold",
"untouched",
"untracked",
"untrained",
"untreated",
"untried",
"untrimmed",
"untrue",
"untruth",
"unturned",
"untwist",
"untying",
"unusable",
"unused",
"unusual",
"unvalued",
"unvaried",
"unvarying",
"unveiled",
"unveiling",
"unvented",
"unviable",
"unvisited",
"unvocal",
"unwanted",
"unwarlike",
"unwary",
"unwashed",
"unwatched",
"unweave",
"unwed",
"unwelcome",
"unwell",
"unwieldy",
"unwilling",
"unwind",
"unwired",
"unwitting",
"unwomanly",
"unworldly",
"unworn",
"unworried",
"unworthy",
"unwound",
"unwoven",
"unwrapped",
"unwritten",
"unzip",
"upbeat",
"upchuck",
"upcoming",
"upcountry",
"update",
"upfront",
"upgrade",
"upheaval",
"upheld",
"uphill",
"uphold",
"uplifted",
"uplifting",
"upload",
"upon",
"upper",
"upright",
"uprising",
"upriver",
"uproar",
"uproot",
"upscale",
"upside",
"upstage",
"upstairs",
"upstart",
"upstate",
"upstream",
"upstroke",
"upswing",
"uptake",
"uptight",
"uptown",
"upturned",
"upward",
"upwind",
"uranium",
"urban",
"urchin",
"urethane",
"urgency",
"urgent",
"urging",
"urologist",
"urology",
"usable",
"usage",
"useable",
"used",
"uselessly",
"user",
"usher",
"usual",
"utensil",
"utility",
"utilize",
"utmost",
"utopia",
"utter",
"vacancy",
"vacant",
"vacate",
"vacation",
"vagabond",
"vagrancy",
"vagrantly",
"vaguely",
"vagueness",
"valiant",
"valid",
"valium",
"valley",
"valuables",
"value",
"vanilla",
"vanish",
"vanity",
"vanquish",
"vantage",
"vaporizer",
"variable",
"variably",
"varied",
"variety",
"various",
"varmint",
"varnish",
"varsity",
"varying",
"vascular",
"vaseline",
"vastly",
"vastness",
"veal",
"vegan",
"veggie",
"vehicular",
"velcro",
"velocity",
"velvet",
"vendetta",
"vending",
"vendor",
"veneering",
"vengeful",
"venomous",
"ventricle",
"venture",
"venue",
"venus",
"verbalize",
"verbally",
"verbose",
"verdict",
"verify",
"verse",
"version",
"versus",
"vertebrae",
"vertical",
"vertigo",
"very",
"vessel",
"vest",
"veteran",
"veto",
"vexingly",
"viability",
"viable",
"vibes",
"vice",
"vicinity",
"victory",
"video",
"viewable",
"viewer",
"viewing",
"viewless",
"viewpoint",
"vigorous",
"village",
"villain",
"vindicate",
"vineyard",
"vintage",
"violate",
"violation",
"violator",
"violet",
"violin",
"viper",
"viral",
"virtual",
"virtuous",
"virus",
"visa",
"viscosity",
"viscous",
"viselike",
"visible",
"visibly",
"vision",
"visiting",
"visitor",
"visor",
"vista",
"vitality",
"vitalize",
"vitally",
"vitamins",
"vivacious",
"vividly",
"vividness",
"vixen",
"vocalist",
"vocalize",
"vocally",
"vocation",
"voice",
"voicing",
"void",
"volatile",
"volley",
"voltage",
"volumes",
"voter",
"voting",
"voucher",
"vowed",
"vowel",
"voyage",
"wackiness",
"wad",
"wafer",
"waffle",
"waged",
"wager",
"wages",
"waggle",
"wagon",
"wake",
"waking",
"walk",
"walmart",
"walnut",
"walrus",
"waltz",
"wand",
"wannabe",
"wanted",
"wanting",
"wasabi",
"washable",
"washbasin",
"washboard",
"washbowl",
"washcloth",
"washday",
"washed",
"washer",
"washhouse",
"washing",
"washout",
"washroom",
"washstand",
"washtub",
"wasp",
"wasting",
"watch",
"water",
"waviness",
"waving",
"wavy",
"whacking",
"whacky",
"wham",
"wharf",
"wheat",
"whenever",
"whiff",
"whimsical",
"whinny",
"whiny",
"whisking",
"whoever",
"whole",
"whomever",
"whoopee",
"whooping",
"whoops",
"why",
"wick",
"widely",
"widen",
"widget",
"widow",
"width",
"wieldable",
"wielder",
"wife",
"wifi",
"wikipedia",
"wildcard",
"wildcat",
"wilder",
"wildfire",
"wildfowl",
"wildland",
"wildlife",
"wildly",
"wildness",
"willed",
"willfully",
"willing",
"willow",
"willpower",
"wilt",
"wimp",
"wince",
"wincing",
"wind",
"wing",
"winking",
"winner",
"winnings",
"winter",
"wipe",
"wired",
"wireless",
"wiring",
"wiry",
"wisdom",
"wise",
"wish",
"wisplike",
"wispy",
"wistful",
"wizard",
"wobble",
"wobbling",
"wobbly",
"wok",
"wolf",
"wolverine",
"womanhood",
"womankind",
"womanless",
"womanlike",
"womanly",
"womb",
"woof",
"wooing",
"wool",
"woozy",
"word",
"work",
"worried",
"worrier",
"worrisome",
"worry",
"worsening",
"worshiper",
"worst",
"wound",
"woven",
"wow",
"wrangle",
"wrath",
"wreath",
"wreckage",
"wrecker",
"wrecking",
"wrench",
"wriggle",
"wriggly",
"wrinkle",
"wrinkly",
"wrist",
"writing",
"written",
"wrongdoer",
"wronged",
"wrongful",
"wrongly",
"wrongness",
"wrought",
"xbox",
"xerox",
"yahoo",
"yam",
"yanking",
"yapping",
"yard",
"yarn",
"yeah",
"yearbook",
"yearling",
"yearly",
"yearning",
"yeast",
"yelling",
"yelp",
"yen",
"yesterday",
"yiddish",
"yield",
"yin",
"yippee",
"yo-yo",
"yodel",
"yoga",
"yogurt",
"yonder",
"yoyo",
"yummy",
"zap",
"zealous",
"zebra",
"zen",
"zeppelin",
"zero",
"zestfully",
"zesty",
"zigzagged",
"zipfile",
"zipping",
"zippy",
"zips",
"zit",
"zodiac",
"zombie",
"zone",
"zoning",
"zookeeper",
"zoologist",
"zoology",
"zoom",
]
|
brycedrennan/pwdgen
|
pwdgen/eff_wordlist.py
|
Python
|
mit
| 116,595
|
[
"Amber",
"BLAST",
"CASINO",
"CRYSTAL",
"ESPResSo",
"Elk",
"GULP",
"NEURON",
"Octopus",
"SIESTA",
"TINKER",
"exciting"
] |
5afa5a51c2c2b38497322e72a2bb767ddda9ec6156b76c11e679ded79ba09d24
|
# Copyright (c) 2014, James Hensman, Alex Matthews
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..util import choleskies
from .sparse_gp import SparseGP
from .parameterization.param import Param
from ..inference.latent_function_inference.svgp import SVGP as svgp_inf
class SVGP(SparseGP):
def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, name='SVGP', Y_metadata=None, batchsize=None, num_latent_functions=None):
"""
Stochastic Variational GP.
For Gaussian Likelihoods, this implements
Gaussian Processes for Big data, Hensman, Fusi and Lawrence, UAI 2013,
But without natural gradients. We'll use the lower-triangluar
representation of the covariance matrix to ensure
positive-definiteness.
For Non Gaussian Likelihoods, this implements
Hensman, Matthews and Ghahramani, Scalable Variational GP Classification, ArXiv 1411.2005
"""
self.batchsize = batchsize
self.X_all, self.Y_all = X, Y
if batchsize is None:
X_batch, Y_batch = X, Y
else:
import climin.util
#Make a climin slicer to make drawing minibatches much quicker
self.slicer = climin.util.draw_mini_slices(self.X_all.shape[0], self.batchsize)
X_batch, Y_batch = self.new_batch()
#create the SVI inference method
inf_method = svgp_inf()
SparseGP.__init__(self, X_batch, Y_batch, Z, kernel, likelihood, mean_function=mean_function, inference_method=inf_method,
name=name, Y_metadata=Y_metadata, normalizer=False)
#assume the number of latent functions is one per col of Y unless specified
if num_latent_functions is None:
num_latent_functions = Y.shape[1]
self.m = Param('q_u_mean', np.zeros((self.num_inducing, num_latent_functions)))
chol = choleskies.triang_to_flat(np.tile(np.eye(self.num_inducing)[None,:,:], (num_latent_functions, 1,1)))
self.chol = Param('q_u_chol', chol)
self.link_parameter(self.chol)
self.link_parameter(self.m)
def parameters_changed(self):
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.q_u_mean, self.q_u_chol, self.kern, self.X, self.Z, self.likelihood, self.Y, self.mean_function, self.Y_metadata, KL_scale=1.0, batch_scale=float(self.X_all.shape[0])/float(self.X.shape[0]))
#update the kernel gradients
self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z)
grad = self.kern.gradient.copy()
self.kern.update_gradients_full(self.grad_dict['dL_dKmn'], self.Z, self.X)
grad += self.kern.gradient.copy()
self.kern.update_gradients_diag(self.grad_dict['dL_dKdiag'], self.X)
self.kern.gradient += grad
if not self.Z.is_fixed:# only compute these expensive gradients if we need them
self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) + self.kern.gradients_X(self.grad_dict['dL_dKmn'], self.Z, self.X)
self.likelihood.update_gradients(self.grad_dict['dL_dthetaL'])
#update the variational parameter gradients:
self.m.gradient = self.grad_dict['dL_dm']
self.chol.gradient = self.grad_dict['dL_dchol']
if self.mean_function is not None:
self.mean_function.update_gradients(self.grad_dict['dL_dmfX'], self.X)
g = self.mean_function.gradient[:].copy()
self.mean_function.update_gradients(self.grad_dict['dL_dmfZ'], self.Z)
self.mean_function.gradient[:] += g
self.Z.gradient[:] += self.mean_function.gradients_X(self.grad_dict['dL_dmfZ'], self.Z)
def set_data(self, X, Y):
"""
Set the data without calling parameters_changed to avoid wasted computation
If this is called by the stochastic_grad function this will immediately update the gradients
"""
assert X.shape[1]==self.Z.shape[1]
self.X, self.Y = X, Y
def new_batch(self):
"""
Return a new batch of X and Y by taking a chunk of data from the complete X and Y
"""
i = self.slicer.next()
return self.X_all[i], self.Y_all[i]
def stochastic_grad(self, parameters):
self.set_data(*self.new_batch())
return self._grads(parameters)
def optimizeWithFreezingZ(self):
self.Z.fix()
self.kern.fix()
self.optimize('bfgs')
self.Z.unfix()
self.kern.constrain_positive()
self.optimize('bfgs')
|
ptonner/GPy
|
GPy/core/svgp.py
|
Python
|
bsd-3-clause
| 4,620
|
[
"Gaussian"
] |
37e86408ab030714e976a88b33e92fe9043593865aed87c446e9725ad5b1a175
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around m = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points x.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when x is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functios
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central t distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
|
piyush0609/scipy
|
scipy/special/add_newdocs.py
|
Python
|
bsd-3-clause
| 70,838
|
[
"Gaussian"
] |
e3fb6accd95a61c1b8b39115ee22498b9ba9abd621c8d37676b0b310ba28eded
|
#! PsiAPI energy example
import psi4
psi4.set_output_file("output.dat", False)
geom = psi4.geometry("""
C # testing escaping comments
""")
psi4.set_options({"SCF_TYPE": "DF",
"BASIS": "cc-pVDZ"})
scf_e, scf_wfn = psi4.energy('SCF', return_wfn=True)
psi4.compare_values(-37.5959861862713893, scf_e, 6, 'SCF DF Energy')
psi4.core.set_local_option("SCF", "SCF_TYPE", "PK")
psi4.compare_values(-37.5959861862713893, scf_e, 6, 'SCF PK Energy')
|
ashutoshvt/psi4
|
tests/python/energy/input.py
|
Python
|
lgpl-3.0
| 466
|
[
"Psi4"
] |
f538932079fdeb1a518678f9a8d6e19569b21471bb618e3f4b18e3cf3c74aaf1
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
# Taher Shihadeh <taher@octality.com>
#
# Copyright (C) 2001-2011 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Page
import Cherokee
import xmlrpclib
import XMLServerDigest
import PageError
import SystemStats
import SystemStatsWidgets
import About
import os
import time
import urllib
import urllib2
import re
import OWS_Login
import OWS_Backup
import OWS_Market_Info
import OWS_Cherokee_Info
from util import *
from consts import *
from configured import *
# URLs
LINK_OCTALITY = 'http://www.octality.com/'
LINK_SUPPORT = 'http://www.octality.com/engineering.html'
OWS_PROUD = 'http://www.octality.com/api/v%s/open/proud/' %(OWS_API_VERSION)
PROUD_USERS_WEB = "http://www.cherokee-project.com/cherokee-domain-list.html"
# Links
LINK_BUGTRACKER = 'http://bugs.cherokee-project.com/'
LINK_TWITTER = 'http://twitter.com/webserver'
LINK_FACEBOOK = 'http://www.facebook.com/cherokee.project'
LINK_DOWNLOAD = 'http://www.cherokee-project.com/download/'
LINK_LISTS = 'http://lists.octality.com/'
LINK_LIST = 'http://lists.octality.com/listinfo/cherokee'
LINK_IRC = 'irc://irc.freenode.net/cherokee'
LINK_HELP = '/help/basics.html'
LINK_CHEROKEE = 'http://www.cherokee-project.com/'
LINK_LINKEDIN = 'http://www.linkedin.com/groups?gid=1819726'
# Subscription
SUBSCRIBE_URL = 'http://lists.octality.com/subscribe/cherokee-dev'
SUBSCRIBE_CHECK = 'Your subscription request has been received'
SUBSCRIBE_APPLY = '/index/subscribe/apply'
NOTE_EMAIL = N_("You will be sent an email requesting confirmation")
NOTE_NAME = N_("Optionally provide your name")
MAILING_LIST_INFO = N_("""\
There are a number of Community <a href="%(link)s" target="_blank">Mailing Lists</a>
available for you to subscribe. You can subscribe the General Discussion
mailing list from this interface. There is where most of the discussions
take place.""")%({'link':LINK_LISTS})
# Server is..
RUNNING_NOTICE = N_('Server is Running')
STOPPED_NOTICE = N_('Server is not Running')
PROUD_USERS_NOTICE = N_("""\
We would love to know that you are using Cherokee. Submit your domain
name and it will be listed on the Cherokee Project web site.
""")
# Dialogs
PROUD_DIALOG_OK = N_("The information has been successfully sent. Thank you!")
PROUS_DIALOG_ERROR1 = N_("Unfortunatelly something went wrong, and the information could not be submitted:")
PROUS_DIALOG_ERROR2 = N_("Please, try again. Do not hesitate to report the problem if it persists.")
#
REMOTE_SERVS_APPLY = '/remote-servs/apply'
REMOTE_SERVS_ENABLE = N_('Enable Remote Services')
# Help entries
HELPS = [('config_status', N_("Status"))]
# JS
JS_SUBSCRIBE = """
$('#subscribe-a').click (function(){ %s });
"""
JS_PROUD = """
$('#proud-a').click (function(){ %s });
"""
JS_SCROLL = """
function resize_cherokee_containers() {
$('#home-container').height($(window).height() - 106);
}
$(document).ready(function() {
resize_cherokee_containers();
$(window).resize(function(){
resize_cherokee_containers();
});
});
"""
def Halt():
# This function halts Cherokee-*admin*
Cherokee.admin.halt()
return {'ret': 'ok'}
def Launch():
if not Cherokee.server.is_alive():
error = Cherokee.server.launch()
if error:
page_error = PageError.PageErrorLaunch (error)
return page_error.Render()
return CTK.HTTP_Redir('/')
def Stop():
Cherokee.pid.refresh()
Cherokee.server.stop()
return CTK.HTTP_Redir('/')
class ServerInfo (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'server-section', 'class': 'infosection'})
infotable = CTK.Table({'class': 'info-table'})
infotable.set_header (column=True, num=1)
is_alive = Cherokee.server.is_alive()
entry = lambda title, string: [CTK.RawHTML (title), CTK.RawHTML(str(string))]
if is_alive:
button = CTK.Button(_('Stop Server'), {'id': 'launch-button', 'class': 'button-stop'})
button.bind ('click', CTK.JS.GotoURL('/stop'))
infotable += [CTK.RawHTML(_(RUNNING_NOTICE)), button]
else:
button = CTK.Button(_('Start Server'), {'id': 'launch-button', 'class': 'button-start'})
button.bind ('click', CTK.JS.GotoURL('/launch'))
infotable += [CTK.RawHTML(_(STOPPED_NOTICE)), button]
sys_stats = SystemStats.get_system_stats()
infotable += entry(_('Hostname'), sys_stats.hostname)
if CTK.cfg.file:
cfg_file = '<span title="%s: %s">%s</span>' % (_('Modified'), self._get_cfg_ctime(), CTK.cfg.file)
else:
cfg_file = _('Not found')
infotable += entry(_("Config File"), cfg_file)
box = CTK.Box()
box += infotable
table = CTK.Table()
table.set_header (column=True, num=1)
table += [CTK.RawHTML (_('Server Information')), box]
self += table
def _get_cfg_ctime (self):
info = os.stat(CTK.cfg.file)
return time.ctime(info.st_ctime)
def RemoteServices_Apply():
enabled = CTK.post.get_val('admin!ows!enabled')
CTK.cfg['admin!ows!enabled'] = enabled
return CTK.cfg_reply_ajax_ok()
class RemoteServices (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'remote-services-section', 'class': 'infosection'})
submit = CTK.Submitter (REMOTE_SERVS_APPLY)
submit += CTK.CheckCfgText ("admin!ows!enabled", True, _(REMOTE_SERVS_ENABLE))
submit.bind ('submit_success', CTK.JS.GotoURL('/'))
infotable = CTK.Table({'class': 'info-table'})
infotable.set_header (column=True, num=1)
if int (CTK.cfg.get_val("admin!ows!enabled", OWS_ENABLE)):
if OWS_Login.is_logged():
infotable += [submit, OWS_Login.LoggedAs_Text()]
else:
dialog = OWS_Login.LoginDialog()
dialog.bind ('submit_success', CTK.JS.GotoURL('/'))
link = CTK.Link ("#", CTK.RawHTML('<span>%s</span>' %(_('Sign in'))))
link.bind ('click', dialog.JS_to_show())
cont = CTK.Container()
cont += dialog
cont += link
infotable += [submit, cont]
else:
infotable += [submit]
table = CTK.Table()
table.set_header (column=True, num=1)
table += [CTK.RawHTML (_('Remote Services')), infotable]
self += table
class BackupService (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'remote-backup-section', 'class': 'infosection'})
cont = CTK.Container()
cont += OWS_Backup.Restore_Config_Button()
cont += OWS_Backup.Save_Config_Button()
table = CTK.Table()
table.set_header (column=True, num=1)
table += [CTK.RawHTML (_('Backup Service')), cont]
self += table
class CPUInfo (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'cpu-section', 'class': 'infosection'})
table = CTK.Table()
table.set_header (column=True, num=1)
table += [CTK.RawHTML (_('Processors')), SystemStatsWidgets.CPU_Info()]
table += [CTK.RawHTML (''), SystemStatsWidgets.CPU_Meter()]
self += table
class MemoryInfo (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'ram-section', 'class': 'infosection'})
meter = SystemStatsWidgets.Memory_Meter()
table = CTK.Table()
table.set_header (column=True, num=1)
table += [CTK.RawHTML (_('Memory')), meter]
self += table
def language_set (langs):
languages = [langs]
try:
CTK.i18n.install_translation('cherokee', LOCALEDIR, languages)
except:
CTK.util.print_exception()
return True
def Lang_Apply():
# Sanity check
lang = CTK.post.get_val('lang')
if not lang:
return {'ret': 'error', 'errors': {'lang': 'Cannot be empty'}}
language_set (lang)
CTK.cfg['admin!lang'] = lang
OWS_Market_Info.invalidate_caches()
return {'ret': 'ok', 'redirect': '/'}
class LanguageSelector (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'language-selector'})
languages = [('', _('Choose'))] + trans_options(AVAILABLE_LANGUAGES)
submit = CTK.Submitter('/lang/apply')
submit.id = 'language-list'
# TODO: Maybe it's better to show selected lang and ommit 'Language' label.
submit += CTK.Combobox ({'name': 'lang'}, languages)
self += CTK.RawHTML('%s: ' %(_('Language')))
self += submit
def ProudUsers_Apply():
# Collect domains
domains = []
for v in CTK.cfg.keys('vserver'):
domains.append (CTK.cfg.get_val('vserver!%s!nick'%(v)))
for d in CTK.cfg.keys('vserver!%s!match!domain'%(v)):
domains.append (CTK.cfg.get_val('vserver!%s!match!domain!%s'%(v, d)))
for d in CTK.cfg.keys('vserver!%s!match!regex'%(v)):
domains.append (CTK.cfg.get_val('vserver!%s!match!regex!%s'%(v, d)))
# Send the list
try:
xmlrpc = XMLServerDigest.XmlRpcServer (OWS_PROUD)
xmlrpc.add_domains_to_review(domains)
except xmlrpclib.ProtocolError, err:
details = "Error code: %d\n" % err.errcode
details += "Error message: %s\n" % err.errmsg
details += "Headers: %s\n" % err.headers
return '<p>%s</p>' %(_(PROUS_DIALOG_ERROR1)) + \
'<p><pre>%s</pre></p>' %(CTK.escape_html(details)) + \
'<p>%s</p>' %(_(PROUS_DIALOG_ERROR2))
except Exception, e:
return '<p>%s</p>' %(_(PROUS_DIALOG_ERROR1)) + \
'<p><pre>%s\n</pre></p>' %(CTK.escape_html(str(e))) + \
'<p>%s</p>' %(_(PROUS_DIALOG_ERROR2))
return "<p>%s</p>" %(_(PROUD_DIALOG_OK))
class ProudUsers (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'proud-users', 'class': 'sidebar-box'})
# Dialog
dialog = CTK.DialogProxyLazy ('/proud/apply', {'title': _('Proud Cherokee User List Submission'), 'width': 480})
dialog.AddButton (_('Close'), "close")
self += CTK.RawHTML('<h2>%s</h2>' %(_('Proud Cherokee Users')))
self += CTK.Box ({'id': 'proud-notice'}, CTK.RawHTML (_(PROUD_USERS_NOTICE)))
self += CTK.Box ({'id': 'proud-link'}, CTK.RawHTML ('<a target="_blank" href="%s">%s</a> | <a id="proud-a">%s</a>' %(_(PROUD_USERS_WEB), _('View list…'), _('Send your domains'))))
self += CTK.RawHTML (js=JS_PROUD %(dialog.JS_to_show()))
self += dialog
def Subscribe_Apply ():
values = {}
for k in CTK.post:
values[k] = CTK.post[k]
data = urllib.urlencode(values)
req = urllib2.Request(SUBSCRIBE_URL, data)
response = urllib2.urlopen(req)
results_page = response.read()
if SUBSCRIBE_CHECK in results_page:
return {'ret':'ok'}
return {'ret':'error'}
class MailingListDialog (CTK.Dialog):
def __init__ (self):
CTK.Dialog.__init__ (self, {'title': _('Mailing List Subscription'), 'width': 560})
self.AddButton (_('Cancel'), "close")
self.AddButton (_('Subscribe'), self.JS_to_trigger('submit'))
table = CTK.PropsTable()
table.Add (_('Your email address'), CTK.TextField({'name': 'email', 'class': 'noauto'}), _(NOTE_EMAIL))
table.Add (_('Your name'), CTK.TextField({'name': 'fullname', 'class': 'noauto', 'optional':True}), _(NOTE_NAME))
submit = CTK.Submitter (SUBSCRIBE_APPLY)
submit.bind ('submit_success', self.JS_to_close())
submit += table
self += CTK.RawHTML ("<p>%s</p>" %(_(MAILING_LIST_INFO)))
self += submit
class SupportBox (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'support-box', 'class': 'sidebar-box'})
qlist = CTK.List ()
self += CTK.RawHTML('<h2>%s</h2>' % _('Support'))
self += qlist
# Help
link = CTK.LinkWindow (LINK_HELP, CTK.RawHTML (_('Getting started')))
qlist += link
# Mailing List
link = CTK.Link ('#', CTK.RawHTML (_('Subscribe to mailing lists')))
dialog = MailingListDialog()
link.bind ('click', dialog.JS_to_show())
self += dialog
qlist += link
# Bug report
link = CTK.LinkWindow (LINK_BUGTRACKER, CTK.RawHTML (_('Report a bug')))
qlist += link
# Commercial Support
link = CTK.LinkWindow (LINK_SUPPORT, CTK.RawHTML (_('Purchase commercial support')))
qlist += link
# About..
dialog = CTK.DialogProxyLazy (About.URL_ABOUT_CONTENT, {'title': _("About Cherokee"), 'width': 600})
dialog.AddButton (_('Close'), 'close')
self += dialog
link = CTK.Link ('#', CTK.RawHTML (_('About Cherokee')))
link.bind ('click', dialog.JS_to_show())
qlist += link
class CommunityBar (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'community-bar'})
slist = CTK.List ()
self += CTK.RawHTML('<h3>%s</h3>' % _('Join the Cherokee Community:'))
self += slist
slist += CTK.LinkWindow (LINK_CHEROKEE, CTK.Image({'src': '/static/images/other/web.png', 'title': _('Visit the Cherokee Project Website')}))
slist += CTK.LinkWindow (LINK_TWITTER, CTK.Image({'src': '/static/images/other/twitter.png', 'title': _('Follow us on Twitter')}))
slist += CTK.LinkWindow (LINK_FACEBOOK, CTK.Image({'src': '/static/images/other/facebook.png', 'title': _('Join us on Facebook')}))
slist += CTK.LinkWindow (LINK_LINKEDIN, CTK.Image({'src': '/static/images/other/linkedin.png', 'title': _('Become a member of Cherokee group on LinkedIn')}))
slist += CTK.LinkWindow (LINK_IRC, CTK.Image({'src': '/static/images/other/irc.png', 'title': _('Chat with us at irc.freenode.net')}))
class HaltAdmin (CTK.Box):
def __init__ (self):
CTK.Box.__init__ (self, {'id': 'halt-admin-box'})
submit = CTK.Submitter('/halt')
submit += CTK.Hidden ('what', 'ever')
dialog = CTK.Dialog ({'title': _('Shutdown Cherokee-admin'), 'width': 560})
dialog.AddButton (_('Cancel'), "close")
dialog.AddButton (_('Shut down'), dialog.JS_to_trigger('submit'))
dialog += CTK.RawHTML ("<h2>%s</h2>" %(_('You are about to shut down this instance of Cherokee-admin')))
dialog += CTK.RawHTML ("<p>%s</p>" %(_('Are you sure you want to proceed?')))
dialog += submit
dialog.bind ('submit', dialog.JS_to_close())
dialog.bind ('submit', "$('body').html('<h1>%s</h1>');"%(_('Cherokee-admin has been shut down')))
link = CTK.Link (None, CTK.RawHTML (_('Shut down Cherokee-Admin')))
link.bind ('click', dialog.JS_to_show())
self += link
self += dialog
class Render:
def __call__ (self):
Cherokee.pid.refresh()
# Top
top = CTK.Box({'id': 'top-box'})
top += CTK.RawHTML ("<h1>%s</h1>"% _('Welcome to Cherokee Admin'))
top += LanguageSelector()
# Content: Left
mainarea = CTK.Box({'id': 'main-area'})
mainarea += OWS_Market_Info.Index_Block1()
mainarea += ServerInfo()
if int(OWS_ENABLE):
mainarea += RemoteServices()
if OWS_Login.is_logged() and \
int (CTK.cfg.get_val("admin!ows!enabled", OWS_ENABLE)):
mainarea += BackupService()
mainarea += CPUInfo()
mainarea += MemoryInfo()
mainarea += CommunityBar()
# Content: Right
sidebar = CTK.Box({'id': 'sidebar'})
sidebar += SupportBox()
if int (CTK.cfg.get_val("admin!ows!enabled", OWS_ENABLE)):
sidebar += OWS_Cherokee_Info.Latest_Release()
sidebar += ProudUsers()
sidebar += OWS_Market_Info.Index_Block2()
sidebar += HaltAdmin()
# Content
cont = CTK.Box({'id': 'home-container'})
cont += mainarea
cont += sidebar
# Page
page = Page.Base(_('Welcome to Cherokee Admin'), body_id='index', helps=HELPS)
page += top
page += cont
page += CTK.RawHTML (js=JS_SCROLL)
return page.Render()
CTK.publish (r'^/$', Render)
CTK.publish (r'^/launch$', Launch)
CTK.publish (r'^/stop$', Stop)
CTK.publish (r'^/halt$', Halt)
CTK.publish (r'^/lang/apply$', Lang_Apply, method="POST")
CTK.publish (r'^/proud/apply$', ProudUsers_Apply, method="POST")
CTK.publish (r'^%s$'%(SUBSCRIBE_APPLY), Subscribe_Apply, method="POST")
CTK.publish (r'^%s$'%(REMOTE_SERVS_APPLY), RemoteServices_Apply, method="POST")
|
nuxleus/cherokee-webserver
|
admin/PageIndex.py
|
Python
|
gpl-2.0
| 17,513
|
[
"VisIt"
] |
62b952edd23daf411bf52db12966fc6199c4f9bbfe500d63912f998ba9f40268
|
# Orca
#
# Copyright 2010-2011 Consorcio Fernando de los Rios.
# Author: Juanje Ojeda Croissier <jojeda@emergya.es>
# Author: Javier Hernandez Antunez <jhernandez@emergya.es>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""JSON backend for Orca settings"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010-2011 Consorcio Fernando de los Rios."
__license__ = "LGPL"
from json import load, dump
import os
from orca import settings, acss
class Backend:
def __init__(self, prefsDir):
""" Initialize the JSON Backend.
"""
self.general = {}
self.pronunciations = {}
self.keybindings = {}
self.profiles = {}
self.settingsFile = os.path.join(prefsDir, "user-settings.conf")
self.appPrefsDir = os.path.join(prefsDir, "app-settings")
self._defaultProfiles = {'default': { 'profile': settings.profile,
'pronunciations': {},
'keybindings': {}
}
}
def saveDefaultSettings(self, general, pronunciations, keybindings):
""" Save default settings for all the properties from
orca.settings. """
prefs = {'general': general,
'profiles': self._defaultProfiles,
'pronunciations': pronunciations,
'keybindings': keybindings}
self.general = general
self.profiles = self._defaultProfiles
self.pronunciations = pronunciations
self.keybindings = keybindings
settingsFile = open(self.settingsFile, 'w')
dump(prefs, settingsFile, indent=4)
settingsFile.close()
def getAppSettings(self, appName):
fileName = os.path.join(self.appPrefsDir, "%s.conf" % appName)
if os.path.exists(fileName):
settingsFile = open(fileName, 'r')
prefs = load(settingsFile)
settingsFile.close()
else:
prefs = {}
return prefs
def saveAppSettings(self, appName, profile, general, pronunciations, keybindings):
prefs = self.getAppSettings(appName)
profiles = prefs.get('profiles', {})
profiles[profile] = {'general': general,
'pronunciations': pronunciations,
'keybindings': keybindings}
prefs['profiles'] = profiles
fileName = os.path.join(self.appPrefsDir, "%s.conf" % appName)
settingsFile = open(fileName, 'w')
dump(prefs, settingsFile, indent=4)
settingsFile.close()
def saveProfileSettings(self, profile, general,
pronunciations, keybindings):
""" Save minimal subset defined in the profile against current
defaults. """
if profile is None:
profile = 'default'
general['pronunciations'] = pronunciations
general['keybindings'] = keybindings
with open(self.settingsFile, 'r+') as settingsFile:
prefs = load(settingsFile)
prefs['profiles'][profile] = general
settingsFile.seek(0)
settingsFile.truncate()
dump(prefs, settingsFile, indent=4)
def _getSettings(self):
""" Load from config file all settings """
settingsFile = open(self.settingsFile)
try:
prefs = load(settingsFile)
except ValueError:
return
self.general = prefs['general'].copy()
self.pronunciations = prefs['pronunciations']
self.keybindings = prefs['keybindings']
self.profiles = prefs['profiles'].copy()
def getGeneral(self, profile=None):
""" Get general settings from default settings and
override with profile values. """
self._getSettings()
generalSettings = self.general.copy()
defaultProfile = generalSettings.get('startingProfile',
['Default', 'default'])
if profile is None:
profile = defaultProfile[1]
profileSettings = self.profiles[profile].copy()
for key, value in profileSettings.items():
if key == 'voices':
for voiceType, voiceDef in value.items():
value[voiceType] = acss.ACSS(voiceDef)
if key not in ['startingProfile', 'activeProfile']:
generalSettings[key] = value
try:
generalSettings['activeProfile'] = profileSettings['profile']
except KeyError:
generalSettings['activeProfile'] = defaultProfile
return generalSettings
def getPronunciations(self, profile='default'):
""" Get pronunciation settings from default settings and
override with profile values. """
self._getSettings()
pronunciations = self.pronunciations.copy()
profileSettings = self.profiles[profile].copy()
if 'pronunciations' in profileSettings:
pronunciations = profileSettings['pronunciations']
return pronunciations
def getKeybindings(self, profile='default'):
""" Get keybindings settings from default settings and
override with profile values. """
self._getSettings()
keybindings = self.keybindings.copy()
profileSettings = self.profiles[profile].copy()
if 'keybindings' in profileSettings:
keybindings = profileSettings['keybindings']
return keybindings
def isFirstStart(self):
""" Check if we're in first start. """
return not os.path.exists(self.settingsFile)
def _setProfileKey(self, key, value):
self.general[key] = value
with open(self.settingsFile, 'r+') as settingsFile:
prefs = load(settingsFile)
prefs['general'][key] = value
settingsFile.seek(0)
settingsFile.truncate()
dump(prefs, settingsFile, indent=4)
def setFirstStart(self, value=False):
"""Set firstStart. This user-configurable setting is primarily
intended to serve as an indication as to whether or not initial
configuration is needed."""
self.general['firstStart'] = value
self._setProfileKey('firstStart', value)
def availableProfiles(self):
""" List available profiles. """
self._getSettings()
profiles = []
for profileName in self.profiles.keys():
profileDict = self.profiles[profileName].copy()
profiles.append(profileDict.get('profile'))
return profiles
def removeProfile(self, profile):
"""Remove an existing profile"""
def removeProfileFrom(dict):
del dict[profile]
# if we removed the last profile, restore the default ones
if len(dict) == 0:
for profileName in self._defaultProfiles:
dict[profileName] = self._defaultProfiles[profileName].copy()
if profile in self.profiles:
removeProfileFrom(self.profiles)
with open(self.settingsFile, 'r+') as settingsFile:
prefs = load(settingsFile)
if profile in prefs['profiles']:
removeProfileFrom(prefs['profiles'])
settingsFile.seek(0)
settingsFile.truncate()
dump(prefs, settingsFile, indent=4)
|
GNOME/orca
|
src/orca/backends/json_backend.py
|
Python
|
lgpl-2.1
| 8,205
|
[
"ORCA"
] |
aade825598bc4f5843fa5f43f0428d79339e8c4c9816122849e72fe197f020c1
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.ssl_certificates import SslCertificatesClient
from google.cloud.compute_v1.services.ssl_certificates import pagers
from google.cloud.compute_v1.services.ssl_certificates import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SslCertificatesClient._get_default_mtls_endpoint(None) is None
assert (
SslCertificatesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(SslCertificatesClient, "rest"),]
)
def test_ssl_certificates_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.SslCertificatesRestTransport, "rest"),],
)
def test_ssl_certificates_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(SslCertificatesClient, "rest"),]
)
def test_ssl_certificates_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_ssl_certificates_client_get_transport_class():
transport = SslCertificatesClient.get_transport_class()
available_transports = [
transports.SslCertificatesRestTransport,
]
assert transport in available_transports
transport = SslCertificatesClient.get_transport_class("rest")
assert transport == transports.SslCertificatesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"),],
)
@mock.patch.object(
SslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SslCertificatesClient),
)
def test_ssl_certificates_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SslCertificatesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SslCertificatesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
SslCertificatesClient,
transports.SslCertificatesRestTransport,
"rest",
"true",
),
(
SslCertificatesClient,
transports.SslCertificatesRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
SslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SslCertificatesClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_ssl_certificates_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [SslCertificatesClient])
@mock.patch.object(
SslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SslCertificatesClient),
)
def test_ssl_certificates_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"),],
)
def test_ssl_certificates_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[(SslCertificatesClient, transports.SslCertificatesRestTransport, "rest", None),],
)
def test_ssl_certificates_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.AggregatedListSslCertificatesRequest, dict,]
)
def test_aggregated_list_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateAggregatedList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
unreachables=["unreachables_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateAggregatedList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.AggregatedListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.unreachables == ["unreachables_value"]
def test_aggregated_list_rest_required_fields(
request_type=compute.AggregatedListSslCertificatesRequest,
):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).aggregated_list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).aggregated_list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
(
"filter",
"include_all_scopes",
"max_results",
"order_by",
"page_token",
"return_partial_success",
)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateAggregatedList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateAggregatedList.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_aggregated_list_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.aggregated_list._get_unset_required_fields({})
assert set(unset_fields) == (
set(
(
"filter",
"includeAllScopes",
"maxResults",
"orderBy",
"pageToken",
"returnPartialSuccess",
)
)
& set(("project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_aggregated_list_rest_interceptors(null_interceptor):
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.SslCertificatesRestInterceptor(),
)
client = SslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.SslCertificatesRestInterceptor, "post_aggregated_list"
) as post, mock.patch.object(
transports.SslCertificatesRestInterceptor, "pre_aggregated_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.SslCertificateAggregatedList.to_json(
compute.SslCertificateAggregatedList()
)
request = compute.AggregatedListSslCertificatesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.SslCertificateAggregatedList
client.aggregated_list(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_aggregated_list_rest_bad_request(
transport: str = "rest", request_type=compute.AggregatedListSslCertificatesRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.aggregated_list(request)
def test_aggregated_list_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateAggregatedList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateAggregatedList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.aggregated_list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/aggregated/sslCertificates"
% client.transport._host,
args[1],
)
def test_aggregated_list_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.aggregated_list(
compute.AggregatedListSslCertificatesRequest(), project="project_value",
)
def test_aggregated_list_rest_pager(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.SslCertificateAggregatedList(
items={
"a": compute.SslCertificatesScopedList(),
"b": compute.SslCertificatesScopedList(),
"c": compute.SslCertificatesScopedList(),
},
next_page_token="abc",
),
compute.SslCertificateAggregatedList(items={}, next_page_token="def",),
compute.SslCertificateAggregatedList(
items={"g": compute.SslCertificatesScopedList(),},
next_page_token="ghi",
),
compute.SslCertificateAggregatedList(
items={
"h": compute.SslCertificatesScopedList(),
"i": compute.SslCertificatesScopedList(),
},
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(
compute.SslCertificateAggregatedList.to_json(x) for x in response
)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.aggregated_list(request=sample_request)
assert isinstance(pager.get("a"), compute.SslCertificatesScopedList)
assert pager.get("h") is None
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, tuple) for i in results)
for result in results:
assert isinstance(result, tuple)
assert tuple(type(t) for t in result) == (
str,
compute.SslCertificatesScopedList,
)
assert pager.get("a") is None
assert isinstance(pager.get("h"), compute.SslCertificatesScopedList)
pages = list(client.aggregated_list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [compute.DeleteSslCertificateRequest, dict,])
def test_delete_unary_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteSslCertificateRequest,
):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["ssl_certificate"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["sslCertificate"] = "ssl_certificate_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "sslCertificate" in jsonified_request
assert jsonified_request["sslCertificate"] == "ssl_certificate_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "sslCertificate",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.SslCertificatesRestInterceptor(),
)
client = SslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.SslCertificatesRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.SslCertificatesRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteSslCertificateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteSslCertificateRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "ssl_certificate": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", ssl_certificate="ssl_certificate_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteSslCertificateRequest(),
project="project_value",
ssl_certificate="ssl_certificate_value",
)
def test_delete_unary_rest_error():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetSslCertificateRequest, dict,])
def test_get_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate(
certificate="certificate_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
expire_time="expire_time_value",
id=205,
kind="kind_value",
name="name_value",
private_key="private_key_value",
region="region_value",
self_link="self_link_value",
subject_alternative_names=["subject_alternative_names_value"],
type_="type__value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.SslCertificate)
assert response.certificate == "certificate_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.expire_time == "expire_time_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.private_key == "private_key_value"
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.subject_alternative_names == ["subject_alternative_names_value"]
assert response.type_ == "type__value"
def test_get_rest_required_fields(request_type=compute.GetSslCertificateRequest):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["ssl_certificate"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["sslCertificate"] = "ssl_certificate_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "sslCertificate" in jsonified_request
assert jsonified_request["sslCertificate"] == "ssl_certificate_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("project", "sslCertificate",)))
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.SslCertificatesRestInterceptor(),
)
client = SslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.SslCertificatesRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.SslCertificatesRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.SslCertificate.to_json(
compute.SslCertificate()
)
request = compute.GetSslCertificateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.SslCertificate
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetSslCertificateRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "ssl_certificate": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", ssl_certificate="ssl_certificate_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetSslCertificateRequest(),
project="project_value",
ssl_certificate="ssl_certificate_value",
)
def test_get_rest_error():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.InsertSslCertificateRequest, dict,])
def test_insert_unary_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["ssl_certificate_resource"] = {
"certificate": "certificate_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"expire_time": "expire_time_value",
"id": 205,
"kind": "kind_value",
"managed": {
"domain_status": {},
"domains": ["domains_value_1", "domains_value_2"],
"status": "status_value",
},
"name": "name_value",
"private_key": "private_key_value",
"region": "region_value",
"self_link": "self_link_value",
"self_managed": {
"certificate": "certificate_value",
"private_key": "private_key_value",
},
"subject_alternative_names": [
"subject_alternative_names_value_1",
"subject_alternative_names_value_2",
],
"type_": "type__value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertSslCertificateRequest,
):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "sslCertificateResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.SslCertificatesRestInterceptor(),
)
client = SslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.SslCertificatesRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.SslCertificatesRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertSslCertificateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertSslCertificateRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["ssl_certificate_resource"] = {
"certificate": "certificate_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"expire_time": "expire_time_value",
"id": 205,
"kind": "kind_value",
"managed": {
"domain_status": {},
"domains": ["domains_value_1", "domains_value_2"],
"status": "status_value",
},
"name": "name_value",
"private_key": "private_key_value",
"region": "region_value",
"self_link": "self_link_value",
"self_managed": {
"certificate": "certificate_value",
"private_key": "private_key_value",
},
"subject_alternative_names": [
"subject_alternative_names_value_1",
"subject_alternative_names_value_2",
],
"type_": "type__value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
ssl_certificate_resource=compute.SslCertificate(
certificate="certificate_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/sslCertificates"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertSslCertificateRequest(),
project="project_value",
ssl_certificate_resource=compute.SslCertificate(
certificate="certificate_value"
),
)
def test_insert_unary_rest_error():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ListSslCertificatesRequest, dict,])
def test_list_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListSslCertificatesRequest):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.SslCertificatesRestInterceptor(),
)
client = SslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.SslCertificatesRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.SslCertificatesRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.SslCertificateList.to_json(
compute.SslCertificateList()
)
request = compute.ListSslCertificatesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.SslCertificateList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListSslCertificatesRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/sslCertificates"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListSslCertificatesRequest(), project="project_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.SslCertificateList(
items=[
compute.SslCertificate(),
compute.SslCertificate(),
compute.SslCertificate(),
],
next_page_token="abc",
),
compute.SslCertificateList(items=[], next_page_token="def",),
compute.SslCertificateList(
items=[compute.SslCertificate(),], next_page_token="ghi",
),
compute.SslCertificateList(
items=[compute.SslCertificate(), compute.SslCertificate(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.SslCertificateList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.SslCertificate) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SslCertificatesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SslCertificatesClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SslCertificatesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SslCertificatesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SslCertificatesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.SslCertificatesRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_ssl_certificates_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SslCertificatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_ssl_certificates_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SslCertificatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"aggregated_list",
"delete",
"get",
"insert",
"list",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_ssl_certificates_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SslCertificatesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_ssl_certificates_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SslCertificatesTransport()
adc.assert_called_once()
def test_ssl_certificates_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SslCertificatesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_ssl_certificates_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.SslCertificatesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_ssl_certificates_host_no_port(transport_name):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_ssl_certificates_host_with_port(transport_name):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SslCertificatesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = SslCertificatesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = SslCertificatesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = SslCertificatesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = SslCertificatesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = SslCertificatesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = SslCertificatesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = SslCertificatesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SslCertificatesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = SslCertificatesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SslCertificatesTransport, "_prep_wrapped_messages"
) as prep:
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SslCertificatesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SslCertificatesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(SslCertificatesClient, transports.SslCertificatesRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-compute
|
tests/unit/gapic/compute_v1/test_ssl_certificates.py
|
Python
|
apache-2.0
| 91,822
|
[
"Octopus"
] |
6b21b4df3393dec2e1280702beb257b1a47ec62874a3b4f4a66bdc4ddc88df1d
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
mug = chigger.exodus.ExodusResult(reader, variable='diffused', cmap='viridis', cmap_num_colors=2)
cbar = chigger.exodus.ExodusColorBar(mug)
window = chigger.RenderWindow(mug, cbar, size=[300,300], test=True)
window.write('colormap_number.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/colormap/colormap_number.py
|
Python
|
lgpl-2.1
| 707
|
[
"MOOSE"
] |
d6bf1106b5464c0e9176af3348e0ae8085d555c38ace626c0b43f2e48062b410
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
from __future__ import unicode_literals, division
import sys
import collections
import numpy as np
from six.moves import zip
from monty.string import is_string, list_strings
from pymatgen.util.num_utils import minloc
from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.Iterable):
"""
Responsible for parsing a list of output files, and managing the parsed database.
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
#DEFAULT_MPI_RANK = "0"
def __init__(self):
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname)
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
data = {}
def parse_line(line):
name, vals = line[:25], line[25:].split()
ctime, cfract, wtime, wfract, ncalls, gflops = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
inside, has_timer = 0, False
for line in fh:
#print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG):].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
#def set_default_mpi_rank(mpi_rank): self._default_mpi_rank = mpi_rank
#def get_default_mpi_rank(mpi_rank): return self._default_mpi_rank
def timers(self, filename=None, mpi_rank="0"):
"""Return the list of timers associated to the given filename and MPI rank mpi_rank."""
if filename is not None:
timers = [self._timers[filename][mpi_rank]]
else:
timers = [self._timers[filename][mpi_rank] for filename in self._filenames]
return timers
def section_names(self, ordkey="wall_time"):
"""Return the names of sections ordered by ordkey."""
section_names = [] # Avoid UnboundLocalError
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
#check = section_names
#else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
#if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() whose name is section_name
A fake section is returned if the timer does not have sectio_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
"""
timers = self.timers()
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total efficieny and the efficiency of each section)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
#print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
def summarize(self, **kwargs):
"""
Return pandas DataFrame
"""
import pandas as pd
colnames = ["fname", "wall_time", "cpu_time", "mpi_nprocs", "omp_nthreads", "mpi_rank"]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].idxmin()
ref_wtime = frame.ix[i]["wall_time"]
ref_ncpus = frame.ix[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="gb", nmax=5, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
peff = self.pefficiency()
# Table with the parallel efficiency for all the sections.
#pprint_table(peff.totable())
n = len(timers)
xx = np.arange(n)
ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
legend_entries = []
# Plot sections with good efficiency.
lines = []
if "g" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
#print(g, peff[g])
yy = peff[g][key]
line, = ax.plot(xx, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "b" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
#print(b, peff[b])
yy = peff[b][key]
line, = ax.plot(xx, yy, "-.<", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(b)
if "total" not in legend_entries:
yy = peff["total"][key]
total_line, = ax.plot(xx, yy, "r", linewidth=3.0, markersize=10)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
#ax.set_title(title)
ax.set_xlabel('Total_NCPUs')
ax.set_ylabel('Efficiency')
ax.grid(True)
# Set xticks and labels.
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""Pie charts of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
n = len(timers)
# Make square figures and axes
the_grid = plt.GridSpec(n, 1)
fig = plt.figure(1, figsize=(6, 6))
for idx, timer in enumerate(timers):
plt.subplot(the_grid[idx, 0])
plt.title(str(timer))
timer.pie(key=key, minfract=minfract)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""Stacked histogram of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax = %d)" % nmax)
values.append(rest)
#for (n, vals) in zip(names, values): print(n, vals)
# The dataset is stored in values.
# Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
# this does not work with matplotlib < 1.0
#plt.rcParams['axes.color_cycle'] = ['r', 'g', 'b', 'c']
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = plt.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
#ax.title("Stacked histogram for the %d most important sections" % nmax)
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
plt.xticks(ind + width / 2.0, labels, rotation=15)
#plt.yticks(np.arange(0,81,10))
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig
def plot_all(self, **kwargs):
figs = []; app = figs.append
app(self.plot_efficiency())
app(self.plot_pie())
app(self.plot_stacked_hist())
return figs
class ParallelEfficiency(dict):
def __init__(self, filenames, ref_idx, *args, **kwargs):
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
estimators = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items)
}
self.estimator = estimators[criterion]
data = []
for (sect_name, peff) in self.items():
# Ignore values where we had a division by zero.
if all([v != -1 for v in peff[key]]):
values = peff[key][:]
#print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
fsort = lambda t: t[1]
data.sort(key=fsort, reverse=reverse)
return tuple([sect_name for (sect_name, e) in data])
def totable(self, stop=None, reverse=True):
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection(object):
"""Record with the timing results associated to a section of code."""
STR_FIELDS = [
"name"
]
NUMERIC_FIELDS = [
"wall_time",
"wall_fract",
"cpu_time",
"cpu_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
return tuple([self.__dict__[at] for at in AbinitTimerSection.FIELDS])
def to_dict(self):
return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
string = ""
for a in AbinitTimerSection.FIELDS: string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer(object):
"""Container class storing the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
# Store sections and names
self.sections = tuple(sections)
self.section_names = tuple([s.name for s in self.sections])
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file = %s, wall_time = %.1f, mpi_nprocs = %d, omp_nthreads = %d" % (
self.fname, self.wall_time, self.mpi_nprocs, self.omp_nthreads )
#string += ", rank = " + self.mpi_rank
return string
def __cmp__(self, other):
return cmp(self.wall_time, other.wall_time)
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
try:
idx = self.section_names.index(section_name)
except:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w")
for (idx, section) in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
# Maintain old API
totable = to_table
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return pandas DataFrame
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
def get_values(self, keys):
"""Return a list of values associated to a particular list of keys"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
else:
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the correspoding value
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for n, v in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for n, v in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
new_names, new_values = names, values
if sorted:
# Sort new_values and rearrange new_names.
fsort = lambda t: t[1]
nandv = [nv for nv in zip(new_names, new_values)]
nandv.sort(key=fsort)
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
fsort = lambda s: s.__dict__[key]
return sorted(self.sections, key=fsort, reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color='r')
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color='y')
# Add ylable and title
ax.set_ylabel('Time (s)')
#if title:
# plt.title(title)
#else:
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ('CPU', 'Wall'), loc="best")
return fig
#def hist2(self, key1="wall_time", key2="cpu_time"):
# labels = self.get_values("name")
# vals1, vals2 = self.get_values([key1, key2])
# N = len(vals1)
# assert N == len(vals2)
# plt.figure(1)
# plt.subplot(2, 1, 1) # 2 rows, 1 column, figure 1
# n1, bins1, patches1 = plt.hist(vals1, N, facecolor="m")
# plt.xlabel(labels)
# plt.ylabel(key1)
# plt.subplot(2, 1, 2)
# n2, bins2, patches2 = plt.hist(vals2, N, facecolor="y")
# plt.xlabel(labels)
# plt.ylabel(key2)
# plt.show()
def pie(self, key="wall_time", minfract=0.05, title=None):
import matplotlib.pyplot as plt
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
return plt.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)
def scatter_hist(self, ax=None, **kwargs):
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
#title = kwargs.pop("title", None)
#show = kwargs.pop("show", True)
#savefig = kwargs.pop("savefig", None)
#fig = plt.figure(1, figsize=(5.5, 5.5))
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
plt.draw()
return fig
|
aykol/pymatgen
|
pymatgen/io/abinit/abitimer.py
|
Python
|
mit
| 26,477
|
[
"ABINIT",
"pymatgen"
] |
829735b1ac5882f7525880087f970c0cf779e8714529303bb4be0f9e8a4630e7
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
mhue/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 34,690
|
[
"Brian"
] |
e4891eb1fd6d291023916aae89c62f6709287b5d5dae328fce8ae2642b5b2ff5
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# Your stuff: custom urls includes go here
url(r'^health-care-plan/', include('powerschool_apps.health_care_plan.urls', namespace='health-care-plan')),
url(r'^vocational-audit/', include('powerschool_apps.vocational_audit.urls', namespace='vocational-audit'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
IronCountySchoolDistrict/powerschool_apps
|
config/urls.py
|
Python
|
mit
| 1,506
|
[
"VisIt"
] |
c82ec8e475e84c769ff13373920ce3cde25987beda4286b7727aa2ea8d7dce62
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import partial
import warnings
import numpy as np
import dask.array as da
import dask.delayed as dd
from dask import threaded
from dask.diagnostics import ProgressBar
from itertools import product
from ..signal import BaseSignal
from ..misc.utils import multiply, dummy_context_manager
from ..external.progressbar import progressbar
from ..external.astroML.histtools import dasky_histogram
from hyperspy.misc.array_tools import _requires_linear_rebin
from hyperspy.exceptions import VisibleDeprecationWarning
_logger = logging.getLogger(__name__)
lazyerror = NotImplementedError('This method is not available in lazy signals')
def to_array(thing, chunks=None):
"""Accepts BaseSignal, dask or numpy arrays and always produces either
numpy or dask array.
Parameters
----------
thing : {BaseSignal, dask.array.Array, numpy.ndarray}
the thing to be converted
chunks : {None, tuple of tuples}
If None, the returned value is a numpy array. Otherwise returns dask
array with the chunks as specified.
Returns
-------
res : {numpy.ndarray, dask.array.Array}
"""
if thing is None:
return None
if isinstance(thing, BaseSignal):
thing = thing.data
if chunks is None:
if isinstance(thing, da.Array):
thing = thing.compute()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def compute(self, progressbar=True, close_file=False):
"""Attempt to store the full signal in memory.
close_file: bool
If True, attemp to close the file associated with the dask
array data if any. Note that closing the file will make all other
associated lazy signals inoperative.
"""
if progressbar:
cm = ProgressBar
else:
cm = dummy_context_manager
with cm():
da = self.data
data = da.compute()
if close_file:
self.close_file()
self.data = data
self._lazy = False
self._assign_subclass()
def close_file(self):
"""Closes the associated data file if any.
Currently it only supports closing the file associated with a dask
array created from an h5py DataSet (default HyperSpy hdf5 reader).
"""
arrkey = None
for key in self.data.dask.keys():
if "array-original" in key:
arrkey = key
break
if arrkey:
try:
self.data.dask[arrkey].file.close()
except AttributeError as e:
_logger.exception("Failed to close lazy Signal file")
def _get_dask_chunks(self, axis=None, dtype=None):
"""Returns dask chunks
Aims:
- Have at least one signal (or specified axis) in a single chunk,
or as many as fit in memory
Parameters
----------
axis : {int, string, None, axis, tuple}
If axis is None (default), returns chunks for current data shape so
that at least one signal is in the chunk. If an axis is specified,
only that particular axis is guaranteed to be "not sliced".
dtype : {string, np.dtype}
The dtype of target chunks.
Returns
-------
Tuple of tuples, dask chunks
"""
dc = self.data
dcshape = dc.shape
for _axis in self.axes_manager._axes:
if _axis.index_in_array < len(dcshape):
_axis.size = int(dcshape[_axis.index_in_array])
if axis is not None:
need_axes = self.axes_manager[axis]
if not np.iterable(need_axes):
need_axes = [need_axes, ]
else:
need_axes = self.axes_manager.signal_axes
if dtype is None:
dtype = dc.dtype
elif not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
typesize = max(dtype.itemsize, dc.dtype.itemsize)
want_to_keep = multiply([ax.size for ax in need_axes]) * typesize
# @mrocklin reccomends to have around 100MB chunks, so we do that:
num_that_fit = int(100. * 2.**20 / want_to_keep)
# want to have at least one "signal" per chunk
if num_that_fit < 2:
chunks = [tuple(1 for _ in range(i)) for i in dc.shape]
for ax in need_axes:
chunks[ax.index_in_array] = dc.shape[ax.index_in_array],
return tuple(chunks)
sizes = [
ax.size for ax in self.axes_manager._axes if ax not in need_axes
]
indices = [
ax.index_in_array for ax in self.axes_manager._axes
if ax not in need_axes
]
while True:
if multiply(sizes) <= num_that_fit:
break
i = np.argmax(sizes)
sizes[i] = np.floor(sizes[i] / 2)
chunks = []
ndim = len(dc.shape)
for i in range(ndim):
if i in indices:
size = float(dc.shape[i])
split_array = np.array_split(
np.arange(size), np.ceil(size / sizes[indices.index(i)]))
chunks.append(tuple(len(sp) for sp in split_array))
else:
chunks.append((dc.shape[i], ))
return tuple(chunks)
def _make_lazy(self, axis=None, rechunk=False, dtype=None):
self.data = self._lazy_data(axis=axis, rechunk=rechunk, dtype=dtype)
def change_dtype(self, dtype, rechunk=True):
from hyperspy.misc import rgb_tools
if not isinstance(dtype, np.dtype) and (dtype not in
rgb_tools.rgb_dtypes):
dtype = np.dtype(dtype)
self._make_lazy(rechunk=rechunk, dtype=dtype)
super().change_dtype(dtype)
change_dtype.__doc__ = BaseSignal.change_dtype.__doc__
def _lazy_data(self, axis=None, rechunk=True, dtype=None):
"""Return the data as a dask array, rechunked if necessary.
Parameters
----------
axis: None, DataAxis or tuple of data axes
The data axis that must not be broken into chunks when `rechunk`
is `True`. If None, it defaults to the current signal axes.
rechunk: bool, "dask_auto"
If `True`, it rechunks the data if necessary making sure that the
axes in ``axis`` are not split into chunks. If `False` it does
not rechunk at least the data is not a dask array, in which case
it chunks as if rechunk was `True`. If "dask_auto", rechunk if
necessary using dask's automatic chunk guessing.
"""
if rechunk == "dask_auto":
new_chunks = "auto"
else:
new_chunks = self._get_dask_chunks(axis=axis, dtype=dtype)
if isinstance(self.data, da.Array):
res = self.data
if self.data.chunks != new_chunks and rechunk:
_logger.info(
"Rechunking.\nOriginal chunks: %s" % str(self.data.chunks))
res = self.data.rechunk(new_chunks)
_logger.info(
"Final chunks: %s " % str(res.chunks))
else:
if isinstance(self.data, np.ma.masked_array):
data = np.where(self.data.mask, np.nan, self.data)
else:
data = self.data
res = da.from_array(data, chunks=new_chunks)
assert isinstance(res, da.Array)
return res
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, rechunk=True):
def get_dask_function(numpy_name):
# Translate from the default numpy to dask functions
translations = {'amax': 'max', 'amin': 'min'}
if numpy_name in translations:
numpy_name = translations[numpy_name]
return getattr(da, numpy_name)
function = get_dask_function(function.__name__)
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes, )
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
# For reduce operations the actual signal and navigation
# axes configuration does not matter. Hence we leave
# dask guess the chunks
if rechunk is True:
rechunk = "dask_auto"
current_data = self._lazy_data(rechunk=rechunk)
# Apply reducing function
new_data = function(current_data, axis=ar_axes)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s = self._deepcopy_with_new_data(new_data)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def rebin(self, new_shape=None, scale=None,
crop=False, out=None, rechunk=True):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale)
if _requires_linear_rebin(arr=self.data, scale=factors):
if new_shape:
raise NotImplementedError(
"Lazy rebin requires that the new shape is a divisor "
"of the original signal shape e.g. if original shape "
"(10| 6), new_shape=(5| 3) is valid, (3 | 4) is not.")
else:
raise NotImplementedError(
"Lazy rebin requires scale to be integer and divisor of the "
"original signal shape")
axis = {ax.index_in_array: ax
for ax in self.axes_manager._axes}[factors.argmax()]
self._make_lazy(axis=axis, rechunk=rechunk)
return super().rebin(new_shape=new_shape,
scale=scale, crop=crop, out=out)
rebin.__doc__ = BaseSignal.rebin.__doc__
def __array__(self, dtype=None):
return self.data.__array__(dtype=dtype)
def _make_sure_data_is_contiguous(self):
self._make_lazy(rechunk=True)
def diff(self, axis, order=1, out=None, rechunk=True):
arr_axis = self.axes_manager[axis].index_in_array
def dask_diff(arr, n, axis):
# assume arr is da.Array already
n = int(n)
if n == 0:
return arr
if n < 0:
raise ValueError("order must be positive")
nd = len(arr.shape)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return dask_diff(arr[slice1] - arr[slice2], n - 1, axis=axis)
else:
return arr[slice1] - arr[slice2]
current_data = self._lazy_data(axis=axis, rechunk=rechunk)
new_data = dask_diff(current_data, order, arr_axis)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ = BaseSignal.diff.__doc__
def integrate_simpson(self, axis, out=None):
axis = self.axes_manager[axis]
from scipy import integrate
axis = self.axes_manager[axis]
data = self._lazy_data(axis=axis, rechunk=True)
new_data = data.map_blocks(
integrate.simps,
x=axis.axis,
axis=axis.index_in_array,
drop_axis=axis.index_in_array,
dtype=data.dtype)
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ = BaseSignal.integrate_simpson.__doc__
def valuemax(self, axis, out=None, rechunk=True):
idx = self.indexmax(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ = BaseSignal.valuemax.__doc__
def valuemin(self, axis, out=None, rechunk=True):
idx = self.indexmin(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ = BaseSignal.valuemin.__doc__
def get_histogram(self, bins='freedman', out=None, rechunk=True, **kwargs):
if 'range_bins' in kwargs:
_logger.warning("'range_bins' argument not supported for lazy "
"signals")
del kwargs['range_bins']
from hyperspy.signals import Signal1D
data = self._lazy_data(rechunk=rechunk).flatten()
hist, bin_edges = dasky_histogram(data, bins=bins, **kwargs)
if out is None:
hist_spec = Signal1D(hist)
hist_spec._lazy = True
hist_spec._assign_subclass()
else:
hist_spec = out
# we always overwrite the data because the computation is lazy ->
# the result signal is lazy. Assume that the `out` is already lazy
hist_spec.data = hist
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.metadata.General.title = (
self.metadata.General.title + " histogram")
hist_spec.metadata.Signal.binned = True
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ = BaseSignal.get_histogram.__doc__
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
# The lower bound of the variance is the gaussian noise.
variance = da.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
# def _get_navigation_signal(self, data=None, dtype=None):
# return super()._get_navigation_signal(data=data, dtype=dtype).as_lazy()
# _get_navigation_signal.__doc__ = BaseSignal._get_navigation_signal.__doc__
# def _get_signal_signal(self, data=None, dtype=None):
# return super()._get_signal_signal(data=data, dtype=dtype).as_lazy()
# _get_signal_signal.__doc__ = BaseSignal._get_signal_signal.__doc__
def _calculate_summary_statistics(self, rechunk=True):
if rechunk is True:
# Use dask auto rechunk instead of HyperSpy's one, what should be
# better for these operations
rechunk = "dask_auto"
data = self._lazy_data(rechunk=rechunk)
_raveled = data.ravel()
_mean, _std, _min, _q1, _q2, _q3, _max = da.compute(
da.nanmean(data),
da.nanstd(data),
da.nanmin(data),
da.percentile(_raveled, [25, ]),
da.percentile(_raveled, [50, ]),
da.percentile(_raveled, [75, ]),
da.nanmax(data), )
return _mean, _std, _min, _q1, _q2, _q3, _max
def _map_all(self, function, inplace=True, **kwargs):
calc_result = dd(function)(self.data, **kwargs)
if inplace:
self.data = da.from_delayed(calc_result, shape=self.data.shape,
dtype=self.data.dtype)
return None
return self._deepcopy_with_new_data(calc_result)
def _map_iterate(self,
function,
iterating_kwargs=(),
show_progressbar=None,
parallel=None,
ragged=None,
inplace=True,
**kwargs):
if ragged not in (True, False):
raise ValueError('"ragged" kwarg has to be bool for lazy signals')
_logger.debug("Entering '_map_iterate'")
size = max(1, self.axes_manager.navigation_size)
from hyperspy.misc.utils import (create_map_objects,
map_result_construction)
func, iterators = create_map_objects(function, size, iterating_kwargs,
**kwargs)
iterators = (self._iterate_signal(), ) + iterators
res_shape = self.axes_manager._navigation_shape_in_array
# no navigation
if not len(res_shape) and ragged:
res_shape = (1,)
all_delayed = [dd(func)(data) for data in zip(*iterators)]
if ragged:
sig_shape = ()
sig_dtype = np.dtype('O')
else:
one_compute = all_delayed[0].compute()
sig_shape = one_compute.shape
sig_dtype = one_compute.dtype
pixels = [
da.from_delayed(
res, shape=sig_shape, dtype=sig_dtype) for res in all_delayed
]
for step in reversed(res_shape):
_len = len(pixels)
starts = range(0, _len, step)
ends = range(step, _len + step, step)
pixels = [
da.stack(
pixels[s:e], axis=0) for s, e in zip(starts, ends)
]
result = pixels[0]
res = map_result_construction(
self, inplace, result, ragged, sig_shape, lazy=True)
return res
def _iterate_signal(self):
if self.axes_manager.navigation_size < 2:
yield self()
return
nav_dim = self.axes_manager.navigation_dimension
sig_dim = self.axes_manager.signal_dimension
nav_indices = self.axes_manager.navigation_indices_in_array[::-1]
nav_lengths = np.atleast_1d(
np.array(self.data.shape)[list(nav_indices)])
getitem = [slice(None)] * (nav_dim + sig_dim)
data = self._lazy_data()
for indices in product(*[range(l) for l in nav_lengths]):
for res, ind in zip(indices, nav_indices):
getitem[ind] = res
yield data[tuple(getitem)]
def _block_iterator(self,
flat_signal=True,
get=threaded.get,
navigation_mask=None,
signal_mask=None):
"""A function that allows iterating lazy signal data by blocks,
defining the dask.Array.
Parameters
----------
flat_signal: bool
returns each block flattened, such that the shape (for the
particular block) is (navigation_size, signal_size), with
optionally masked elements missing. If false, returns
the equivalent of s.inav[{blocks}].data, where masked elements are
set to np.nan or 0.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not returned (flat) or
set to NaN or 0.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not returned (flat) or set
to NaN or 0.
"""
self._make_lazy()
data = self._data_aligned_with_axes
nav_chunks = data.chunks[:self.axes_manager.navigation_dimension]
indices = product(*[range(len(c)) for c in nav_chunks])
signalsize = self.axes_manager.signal_size
sig_reshape = (signalsize,) if signalsize else ()
data = data.reshape((self.axes_manager.navigation_shape[::-1] +
sig_reshape))
if signal_mask is None:
signal_mask = slice(None) if flat_signal else \
np.zeros(self.axes_manager.signal_size, dtype='bool')
else:
try:
signal_mask = to_array(signal_mask).ravel()
except ValueError:
# re-raise with a message
raise ValueError("signal_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(signal_mask)))
if flat_signal:
signal_mask = ~signal_mask
if navigation_mask is None:
nav_mask = da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks,
dtype='bool')
else:
try:
nav_mask = to_array(navigation_mask, chunks=nav_chunks)
except ValueError:
# re-raise with a message
raise ValueError("navigation_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(navigation_mask)))
if flat_signal:
nav_mask = ~nav_mask
for ind in indices:
chunk = get(data.dask,
(data.name, ) + ind + (0,) * bool(signalsize))
n_mask = get(nav_mask.dask, (nav_mask.name, ) + ind)
if flat_signal:
yield chunk[n_mask, ...][..., signal_mask]
else:
chunk = chunk.copy()
value = np.nan if np.can_cast('float', chunk.dtype) else 0
chunk[n_mask, ...] = value
chunk[..., signal_mask] = value
yield chunk.reshape(chunk.shape[:-1] +
self.axes_manager.signal_shape[::-1])
def decomposition(self,
normalize_poissonian_noise=False,
algorithm='svd',
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=threaded.get,
num_chunks=None,
reproject=True,
bounds=False,
**kwargs):
"""Perform Incremental (Batch) decomposition on the data, keeping n
significant components.
Parameters
----------
normalize_poissonian_noise : bool
If True, scale the SI to normalize Poissonian noise
algorithm : str
One of ('svd', 'PCA', 'ORPCA', 'ONMF'). By default 'svd',
lazy SVD decomposition from dask.
output_dimension : int
the number of significant components to keep. If None, keep all
(only valid for SVD)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain atleast output_dimension signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decompostion.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition.
reproject : bool
Reproject data on the learnt components (factors) after learning.
**kwargs
passed to the partial_fit/fit functions.
Notes
-----
Various algorithm parameters and their default values:
ONMF:
lambda1=1,
kappa=1,
robust=False,
store_r=False
batch_size=None
ORPCA:
fast=True,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None
PCA:
batch_size=None,
copy=True,
white=False
"""
if bounds:
msg = (
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.")
warnings.warn(msg, VisibleDeprecationWarning)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[:self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension:]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if algorithm != "svd" and output_dimension is None:
raise ValueError("With the %s the output_dimension "
"must be specified" % algorithm)
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# LEARN
if algorithm == 'PCA':
from sklearn.decomposition import IncrementalPCA
obj = IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
elif algorithm == 'ORPCA':
from hyperspy.learn.rpca import ORPCA
kwg = {'fast': True}
kwg.update(kwargs)
obj = ORPCA(output_dimension, **kwg)
method = partial(obj.fit, iterating=True)
elif algorithm == 'ONMF':
from hyperspy.learn.onmf import ONMF
batch_size = kwargs.pop('batch_size', None)
obj = ONMF(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm != "svd":
raise ValueError('algorithm not known')
original_data = self.data
try:
if normalize_poissonian_noise:
data = self._data_aligned_with_axes
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
nm = da.logical_not(
da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks)
if navigation_mask is None else to_array(
navigation_mask, chunks=nav_chunks))
sm = da.logical_not(
da.zeros(
self.axes_manager.signal_shape[::-1],
chunks=sig_chunks)
if signal_mask is None else to_array(
signal_mask, chunks=sig_chunks))
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
bH, aG = da.compute(
data.sum(axis=range(ndim)),
data.sum(axis=range(ndim, ndim + sdim)))
bH = da.where(sm, bH, 1)
aG = da.where(nm, aG, 1)
raG = da.sqrt(aG)
rbH = da.sqrt(bH)
coeff = raG[(..., ) + (None, ) * rbH.ndim] *\
rbH[(None, ) * raG.ndim + (...,)]
coeff.map_blocks(np.nan_to_num)
coeff = da.where(coeff == 0, 1, coeff)
data = data / coeff
self.data = data
# LEARN
if algorithm == "svd":
reproject = False
from dask.array.linalg import svd
try:
self._unfolded4decomposition = self.unfold()
# TODO: implement masking
if navigation_mask or signal_mask:
raise NotImplemented(
"Masking is not yet implemented for lazy SVD."
)
U, S, V = svd(self.data)
factors = V.T
explained_variance = S ** 2 / self.data.shape[0]
loadings = U * S
finally:
if self._unfolded4decomposition is True:
self.fold()
self._unfolded4decomposition is False
else:
this_data = []
try:
for chunk in progressbar(
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask),
total=nblocks,
leave=True,
desc='Learn'):
this_data.append(chunk)
if len(this_data) == num_chunks:
thedata = np.concatenate(this_data, axis=0)
method(thedata)
this_data = []
if len(this_data):
thedata = np.concatenate(this_data, axis=0)
method(thedata)
except KeyboardInterrupt:
pass
# GET ALREADY CALCULATED RESULTS
if algorithm == 'PCA':
explained_variance = obj.explained_variance_
explained_variance_ratio = obj.explained_variance_ratio_
factors = obj.components_.T
elif algorithm == 'ORPCA':
_, _, U, S, V = obj.finish()
factors = U * S
loadings = V
explained_variance = S**2 / len(factors)
elif algorithm == 'ONMF':
factors, loadings = obj.finish()
loadings = loadings.T
# REPROJECT
if reproject:
if algorithm == 'PCA':
method = obj.transform
def post(a): return np.concatenate(a, axis=0)
elif algorithm == 'ORPCA':
method = obj.project
obj.R = []
def post(a): return obj.finish()[4]
elif algorithm == 'ONMF':
method = obj.project
def post(a): return np.concatenate(a, axis=1).T
_map = map(lambda thing: method(thing),
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask))
H = []
try:
for thing in progressbar(
_map, total=nblocks, desc='Project'):
H.append(thing)
except KeyboardInterrupt:
pass
loadings = post(H)
if explained_variance is not None and \
explained_variance_ratio is None:
explained_variance_ratio = \
explained_variance / explained_variance.sum()
# RESHUFFLE "blocked" LOADINGS
ndim = self.axes_manager.navigation_dimension
if algorithm != "svd": # Only needed for online algorithms
try:
loadings = _reshuffle_mixed_blocks(
loadings,
ndim,
(output_dimension,),
nav_chunks).reshape((-1, output_dimension))
except ValueError:
# In case the projection step was not finished, it's left
# as scrambled
pass
finally:
self.data = original_data
target = self.learning_results
target.decomposition_algorithm = algorithm
target.output_dimension = output_dimension
if algorithm != "svd":
target._object = obj
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
# Rescale the results if the noise was normalized
if normalize_poissonian_noise is True:
target.factors = target.factors * rbH.ravel()[:, np.newaxis]
target.loadings = target.loadings * raG.ravel()[:, np.newaxis]
def _reshuffle_mixed_blocks(array, ndim, sshape, nav_chunks):
"""Reshuffles dask block-shuffled array
Parameters
----------
array : np.ndarray
the array to reshuffle
ndim : int
the number of navigation (shuffled) dimensions
sshape : tuple of ints
The shape
"""
splits = np.cumsum([multiply(ar)
for ar in product(*nav_chunks)][:-1]).tolist()
if splits:
all_chunks = [
ar.reshape(shape + sshape)
for shape, ar in zip(
product(*nav_chunks), np.split(array, splits))
]
def split_stack_list(what, step, axis):
total = len(what)
if total != step:
return [
np.concatenate(
what[i:i + step], axis=axis)
for i in range(0, total, step)
]
else:
return np.concatenate(what, axis=axis)
for chunks, axis in zip(nav_chunks[::-1], range(ndim - 1, -1, -1)):
step = len(chunks)
all_chunks = split_stack_list(all_chunks, step, axis)
return all_chunks
else:
return array
|
magnunor/hyperspy
|
hyperspy/_signals/lazy.py
|
Python
|
gpl-3.0
| 36,884
|
[
"Gaussian"
] |
9822532fc6cb674a660e1ab6684742f663c0351267a98595b2f0ac419e17a564
|
from os.path import exists
import numpy as np
import pandas as pd
import pygrib
from pyproj import Proj
from scipy.ndimage import gaussian_filter
from scipy.spatial import cKDTree
class HailForecastGrid(object):
"""
HailForecastGrid loads and stores gridded machine learning hail forecasts from GRIB2 files. It can load
an arbitrary number of members and timesteps at once.
Attributes:
run_date (datetime.datetime): Date of the initial time of the model run
start_date (datetime.datetime): Date of the initial forecast time being loaded
end_date (datetime.datetime): Date of the final forecast time being loaded
forecast_dates (pandas.DatetimeIndex): All forecast times
ensemble_name (str): Name of the NWP ensemble being used
ml_model (str): Name of the machine learning model being loaded
variable (str): Name of the machine learning model variable being forecast
message_number (int): Field in the GRIB2 file to load. The first field in the file has message number 1.
path (str): Path to top-level GRIB2 directory. Assumes files are stored in directories by run_date
data (ndarray): Hail forecast data with dimensions (member, time, y, x)
lon (ndarray): 2D array of longitudes
lat (ndarray): 2D array of latitudes
x (ndarray): 2d array of x-coordinate values in km
y (ndarray): 2d array of y-coordinate values in km
i (ndarray): 2d array of row indices
j (ndarray): 2d array of column indices
dx (float): distance between grid points
proj (Proj): a pyproj projection object used for converting lat-lon points to x-y coordinate values
projparams (dict): PROJ4 parameters describing map projection
"""
def __init__(self, run_date, start_date, end_date, ensemble_name, ml_model, members,
variable, message_number, path):
self.run_date = run_date
self.start_date = start_date
self.end_date = end_date
self.forecast_dates = pd.date_range(start=self.start_date, end=self.end_date, freq="1H")
self.ensemble_name = ensemble_name
self.ml_model = ml_model
self.members = members
self.variable = variable
self.message_number = message_number
self.path = path
self.data = None
self.lon = None
self.lat = None
self.x = None
self.y = None
self.i = None
self.j = None
self.dx = None
self.proj = None
self.projparams = None
return
def load_data(self):
for m, member in enumerate(self.members):
for f, forecast_date in enumerate(self.forecast_dates.to_pydatetime()):
dt = int((forecast_date - self.run_date).total_seconds() / 3600)
filename_args = (self.ensemble_name, member, self.ml_model, self.variable,
forecast_date.strftime("%Y%m%d%H%M"))
filename = self.path + self.run_date.strftime("%Y%m%d") + \
"/{0}_{1}_{2}_{3}_{4}.grib2".format(*filename_args)
if not exists(filename):
filename_args = (self.ensemble_name, member, self.ml_model, self.variable,
self.run_date.strftime("%Y%m%d%H") + "f{0:02d}".format(dt))
filename = self.path + self.run_date.strftime("%Y%m%d") + \
"/{0}_{1}_{2}_{3}_{4}.grib2".format(*filename_args)
if not exists(filename):
continue
grbs = pygrib.open(filename)
if self.lon is None:
self.lat, self.lon = grbs[self.message_number].latlons()
self.projparams = grbs[self.message_number].projparams
self.proj = Proj(grbs[self.message_number].projparams)
self.x, self.y = self.proj(self.lon, self.lat)
self.x /= 1000.0
self.y /= 1000.0
self.dx = grbs[self.message_number]['DxInMetres'] / 1000.0
self.i, self.j = np.indices(self.lon.shape)
data = grbs[self.message_number].values
data *= 1000.0
if self.data is None:
self.data = np.empty((len(self.members), len(self.forecast_dates),
data.shape[0], data.shape[1]), dtype=float)
self.data[m, f] = data.filled(0)
grbs.close()
return
def period_neighborhood_probability(self, radius, smoothing, threshold, stride, start_time, end_time):
"""
Calculate the neighborhood probability over the full period of the forecast
Args:
radius: circular radius from each point in km
smoothing: width of Gaussian smoother in km
threshold: intensity of exceedance
stride: number of grid points to skip for reduced neighborhood grid
Returns:
(neighborhood probabilities)
"""
neighbor_x = self.x[::stride, ::stride]
neighbor_y = self.y[::stride, ::stride]
neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T)
neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1]))
print('Forecast Hours: {0}-{1}'.format(start_time, end_time))
for m in range(len(self.members)):
period_max = self.data[m, start_time:end_time, :, :].max(axis=0)
valid_i, valid_j = np.where(period_max >= threshold)
print(self.members[m], len(valid_i))
if len(valid_i) > 0:
var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T)
exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(
int)
exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape)
neighbor_prob[m][exceed_i, exceed_j] = 1
if smoothing > 0:
neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing, mode='constant')
return neighbor_prob
|
djgagne/hagelslag
|
hagelslag/data/HailForecastGrid.py
|
Python
|
mit
| 6,310
|
[
"Gaussian"
] |
5221949e61904b30020fbbcb06a189fe84b7c60ecea81f5868b8746ed1a0547f
|
from argparse import *
from sys import *
from os.path import *
from vtk import *
from paraview.simple import *
from paraview.servermanager import *
parser = ArgumentParser(description = 'Process the arguments')
parser.add_argument('vtkFile', help = 'Path to the VTK file')
parser.add_argument('outputDir', help = 'Path to the output directory (must be writable)')
args = parser.parse_args()
vtkReader = OpenDataFile(args.vtkFile)
frameInstance = Fetch(vtkReader)
frameRange = frameInstance.GetPointData().GetArray(0).GetRange()
vtkImgCast = vtkImageShiftScale()
vtkImgCast.SetShift(-frameRange[0])
vtkImgCast.SetScale(1.0 / (frameRange[1] - frameRange[0]) )
vtkImgCast.SetOutputScalarTypeToFloat()
vtkImgCast.SetInputData(frameInstance)
vtkImgCast.Update()
vtkImgWriter = vtkTIFFWriter()
vtkImgWriter.SetCompressionToNoCompression()
outputFilePrefix = splitext(basename(args.vtkFile) )[0]
fileName = args.outputDir + '/' + outputFilePrefix + '.tiff'
vtkImgWriter.SetFileName(fileName)
vtkImgWriter.SetInputConnection(vtkImgCast.GetOutputPort() )
vtkImgWriter.Write()
print 'OK: ' + fileName
stdout.flush()
|
fercook/SciViz
|
Voxels/NETCDF_2_VTK_2_Blender/job/dem/paraview/VTKDEM2TIFF.py
|
Python
|
gpl-2.0
| 1,106
|
[
"ParaView",
"VTK"
] |
caf642fb3740c1deafa529ac7620b58783eaf1806287b6cf6e102a1b5843ae7c
|
# Author: Kyle A. Beauchamp <kyleabeauchamp@gmail.com>
# Contributors: Robert McGibbon <rmcgibbo@gmail.com>,
# Matthew Harrigan <matthew.p.harrigan@gmail.com>
# Brooke Husic <brookehusic@gmail.com>,
# Muneeb Sultan <msultan@stanford.edu>
# Copyright (c) 2016, Stanford University and the Authors
# All rights reserved.
from __future__ import print_function, division, absolute_import
import warnings
import mdtraj as md
import numpy as np
import sklearn.pipeline
from scipy.stats import vonmises as vm
from msmbuilder import libdistance
from msmbuilder.utils import unique
import itertools
import inspect
from sklearn.base import TransformerMixin
from sklearn.externals.joblib import Parallel, delayed
from ..base import BaseEstimator
def zippy_maker(aind_tuples, top):
resseqs = []
resids = []
resnames = []
for ainds in aind_tuples:
resid = unique([top.atom(ai).residue.index for ai in ainds])
resids += [list(resid)]
reseq = unique([top.atom(ai).residue.resSeq for ai in ainds])
resseqs += [list(reseq)]
resname = unique([top.atom(ai).residue.name for ai in ainds])
resnames += [list(resname)]
return zip(aind_tuples, resseqs, resids, resnames)
def dict_maker(zippy):
feature_descs = []
for featurizer, featuregroup, other_info, feature_info in zippy:
ainds, resseq, resid, resname = feature_info
feature_descs += [dict(
resnames=resname,
atominds=ainds,
resseqs=resseq,
resids=resid,
featurizer=featurizer,
featuregroup="{}".format(featuregroup),
otherinfo ="{}".format(other_info)
)]
return feature_descs
def featurize_all(filenames, featurizer, topology, chunk=1000, stride=1):
"""Load and featurize many trajectory files.
Parameters
----------
filenames : list of strings
List of paths to MD trajectory files
featurizer : Featurizer
The featurizer to be invoked on each trajectory trajectory as
it is loaded
topology : str, Topology, Trajectory
Topology or path to a topology file, used to load trajectories with
MDTraj
chunk : {int, None}
If chunk is an int, load the trajectories up in chunks using
md.iterload for better memory efficiency (less trajectory data needs
to be in memory at once)
stride : int, default=1
Only read every stride-th frame.
Returns
-------
data : np.ndarray, shape=(total_length_of_all_trajectories, n_features)
indices : np.ndarray, shape=(total_length_of_all_trajectories)
fns : np.ndarray shape=(total_length_of_all_trajectories)
These three arrays all share the same indexing, such that data[i] is
the featurized version of indices[i]-th frame in the MD trajectory
with filename fns[i].
"""
data = []
indices = []
fns = []
for file in filenames:
kwargs = {} if file.endswith('.h5') else {'top': topology}
count = 0
for t in md.iterload(file, chunk=chunk, stride=stride, **kwargs):
x = featurizer.partial_transform(t)
n_frames = len(x)
data.append(x)
indices.append(count + (stride * np.arange(n_frames)))
fns.extend([file] * n_frames)
count += (stride * n_frames)
if len(data) == 0:
raise ValueError("None!")
return np.concatenate(data), np.concatenate(indices), np.array(fns)
class Featurizer(BaseEstimator, TransformerMixin):
"""Base class for objects that featurize Trajectories.
Notes
-----
At the bare minimum, a featurizer must implement the `partial_transform(traj)`
member function. A `transform(traj_list)` for featurizing multiple
trajectories in batch will be provided.
"""
def __init__(self):
pass
def featurize(self, traj):
raise NotImplementedError('This API was removed. Use partial_transform instead')
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
pass
def fit(self, traj_list, y=None):
return self
def transform(self, traj_list, y=None):
"""Featurize a several trajectories.
Parameters
----------
traj_list : list(mdtraj.Trajectory)
Trajectories to be featurized.
Returns
-------
features : list(np.ndarray), length = len(traj_list)
The featurized trajectories. features[i] is the featurized
version of traj_list[i] and has shape
(n_samples_i, n_features)
"""
return [self.partial_transform(traj) for traj in traj_list]
def describe_features(self, traj):
"""Generic method for describing features.
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to use
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each feature
- resnames: unique names of residues
- atominds: the four atom indicies
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: Featurizer name
- featuregroup: Other information
Notes
-------
Method resorts to returning N/A for everything if describe_features in not
implemented in the sub_class
"""
n_f = self.partial_transform(traj).shape[1]
zippy=zip(itertools.repeat("N/A", n_f),
itertools.repeat("N/A", n_f),
itertools.repeat("N/A", n_f),
itertools.repeat(("N/A","N/A","N/A","N/A"), n_f))
return dict_maker(zippy)
class SuperposeFeaturizer(Featurizer):
"""Featurizer based on euclidian atom distances to reference structure.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing the distances from a specified set of atoms to
the 'reference position' of those atoms, in ``reference_traj``.
Parameters
----------
atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
The indices of the atoms to superpose and compute the distances with
reference_traj : md.Trajectory
The reference conformation to superpose each frame with respect to
(only the first frame in reference_traj is used)
superpose_atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
If not None, these atom_indices are used for the superposition
"""
def __init__(self, atom_indices, reference_traj, superpose_atom_indices=None):
self.atom_indices = atom_indices
if superpose_atom_indices is None:
self.superpose_atom_indices = atom_indices
else:
self.superpose_atom_indices = superpose_atom_indices
self.reference_traj = reference_traj
self.n_features = len(self.atom_indices)
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via distance
after superposition
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
traj.superpose(self.reference_traj,
atom_indices=self.superpose_atom_indices)
diff2 = (traj.xyz[:, self.atom_indices] -
self.reference_traj.xyz[0, self.atom_indices]) ** 2
x = np.sqrt(np.sum(diff2, axis=2))
return x
class RMSDFeaturizer(Featurizer):
"""Featurizer based on RMSD to one or more reference structures.
This featurizer inputs a trajectory to be analyzed ('traj') and a
reference trajectory ('ref') and outputs the RMSD of each frame of
traj with respect to each frame in ref. The output is a numpy array
with n_rows = traj.n_frames and n_columns = ref.n_frames.
Parameters
----------
reference_traj : md.Trajectory
The reference conformations to superpose each frame with respect to
atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
The indices of the atoms to superpose and compute the distances with.
If not specified, all atoms are used.
trj0
Deprecated. Please use reference_traj.
"""
def __init__(self, reference_traj=None, atom_indices=None, trj0=None):
if trj0 is not None:
warnings.warn("trj0 is deprecated. Please use reference_traj",
DeprecationWarning)
reference_traj = trj0
else:
if reference_traj is None:
raise ValueError("Please specify a reference trajectory")
self.atom_indices = atom_indices
if self.atom_indices is not None:
self.sliced_reference_traj = reference_traj.atom_slice(self.atom_indices)
else:
self.sliced_reference_traj = reference_traj
self.atom_indices = [i for i in range(self.sliced_reference_traj.n_atoms)]
def _transform(self, value):
return value
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via distance
after superposition
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, shape=(n_frames, n_ref_frames)
The RMSD value of each frame of the input trajectory to be
featurized versus each frame in the reference trajectory. The
number of features is the number of reference frames.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
if self.atom_indices is not None:
sliced_traj = traj.atom_slice(self.atom_indices)
else:
sliced_traj = traj
result = libdistance.cdist(
sliced_traj, self.sliced_reference_traj, 'rmsd'
)
return self._transform(result)
class LandMarkRMSDFeaturizer(RMSDFeaturizer):
"""Kernel Landmark Featuizer based on RMSD to one or more reference structures.
This featurizer inputs a trajectory to be analyzed ('traj') and a
reference trajectory ('ref') and outputs the kernelized
RMSD of each frame of traj with respect to each frame in ref.
The output is a numpy array with n_rows = traj.n_frames
and n_columns = ref.n_frames. This uses a exponential/gaussian
kernel.
Parameters
----------
reference_traj : md.Trajectory
The reference conformations to superpose each frame with respect to
atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
The indices of the atoms to superpose and compute the distances with.
If not specified, all atoms are used.
sigma: np.float , dtype=float
The kernel width to use. Defaults to 0.3nm
"""
def __init__(self, reference_traj=None, atom_indices=None, sigma=0.3):
super(LandMarkRMSDFeaturizer, self).__init__(reference_traj,atom_indices)
self.sigma = sigma
def _transform(self, value):
return np.exp(-value**2/(2* self.sigma **2))
def describe_features(self, traj):
"""Return a list of dictionaries describing the LandmarkRMSD features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each feature
- resnames: unique names of residues
- atominds: the four atom indicies
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: Alpha Angle
- featuregroup: the type of dihedral angle and whether sin or
cos has been applied.
"""
feature_descs = []
# fill in the atom indices using just the first frame
self.partial_transform(traj[0])
top = traj.topology
aind_tuples = [self.atom_indices for _ in range(self.sliced_reference_traj.n_frames)]
zippy = zippy_maker(aind_tuples, top)
zippy = itertools.product(["LandMarkFeaturizer"], ["RMSD"], [self.sigma], zippy)
feature_descs.extend(dict_maker(zippy))
return feature_descs
class AtomPairsFeaturizer(Featurizer):
"""Featurizer based on distances between specified pairs of atoms.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the distances between the specified pairs of atoms.
Parameters
----------
pair_indices : np.ndarray, shape=(n_pairs, 2), dtype=int
Each row gives the indices of two atoms involved in the interaction.
periodic : bool, default=False
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
exponent : float
Modify the distances by raising them to this exponent.
"""
def __init__(self, pair_indices, periodic=False, exponent=1.):
# TODO: We might want to implement more error checking here. Or during
# featurize(). E.g. are the pair_indices supplied valid?
self.pair_indices = pair_indices
self.atom_indices = pair_indices
self.n_features = len(self.pair_indices)
self.periodic = periodic
self.exponent = exponent
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via pairwise
atom-atom distances
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
d = md.geometry.compute_distances(traj, self.pair_indices,
periodic=self.periodic)
return d ** self.exponent
def describe_features(self, traj):
"""Return a list of dictionaries describing the atom pair features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the two atom inds
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: AtomPairsFeaturizer
- featuregroup: Distance.
- other info : Value of the exponent
"""
feature_descs = []
top = traj.topology
residue_indices = [[top.atom(i[0]).residue.index, top.atom(i[1]).residue.index] \
for i in self.atom_indices]
aind = []
resseqs = []
resnames = []
for ind,resid_ids in enumerate(residue_indices):
aind += [[i for i in self.atom_indices[ind]]]
resseqs += [[top.residue(ri).resSeq for ri in resid_ids]]
resnames += [[top.residue(ri).name for ri in resid_ids]]
zippy = itertools.product(["AtomPairs"], ["Distance"],
["Exponent {}".format(self.exponent)],
zip(aind, resseqs, residue_indices, resnames))
feature_descs.extend(dict_maker(zippy))
return feature_descs
class FunctionFeaturizer(Featurizer):
"""Featurizer based on arbitrary functions.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector the output of the function.
Parameters
----------
function : function
Instantiation of the function. The function should accept
a mdtraj.Trajectory object as the first argument.
func_args : dictionary
A dictionary of key word arguments(keys) and their values to
pass to the function. These should NOT include the trajectory
object which is passed in as the first argument.
Notes
----------
This Featurizer assumes that the function takes in the trajectory object
as the first argument.
Examples
--------
>>> function = compute_dihedrals
>>> f = FunctionFeaturizer(function, func_args={indices: [[0,1,2,3]]})
>>> results = f.transform(dataset)
"""
def __init__(self, function, func_args={}):
if callable(function):
self.function = function
self.func_args = func_args
else:
raise ValueError("Sorry but we "
"couldn't use the "
"provided function "
"because it is not "
"callable")
def partial_transform(self, traj):
"""Featurize an MD trajectory using the provided function.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
Notes
-----
This method assumes that the function takes in the trajectory object
as the first argument.
"""
return self.function(traj, **self.func_args)
class DihedralFeaturizer(Featurizer):
"""Featurizer based on dihedral angles.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing one or more of the backbone or side-chain dihedral
angles, or the sin and cosine of these angles.
Parameters
----------
types : list
One or more of ['phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4']
sincos : bool
Instead of outputting the angle, return the sine and cosine of the
angle as separate features.
"""
def __init__(self, types=['phi', 'psi'], sincos=True):
if isinstance(types, str):
types = [types]
self.types = list(types) # force a copy
self.sincos = sincos
known = {'phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4'}
if not set(types).issubset(known):
raise ValueError('angles must be a subset of %s. you supplied %s' %
(str(known), str(types)))
def describe_features(self, traj):
"""Return a list of dictionaries describing the dihderal features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the four atom indicies
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: Dihedral
- featuregroup: the type of dihedral angle and whether sin or
cos has been applied.
"""
feature_descs = []
for dihed_type in self.types:
# TODO: Don't recompute dihedrals, just get the indices
func = getattr(md, 'compute_%s' % dihed_type)
# ainds is a list of four-tuples of atoms participating
# in each dihedral
aind_tuples, _ = func(traj)
top = traj.topology
zippy = zippy_maker(aind_tuples, top)
if self.sincos:
zippy = itertools.product(['Dihedral'],[dihed_type], ['sin', 'cos'], zippy)
else:
zippy = itertools.product(['Dihedral'],[dihed_type], ['nosincos'], zippy)
feature_descs.extend(dict_maker(zippy))
return feature_descs
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
x = []
for a in self.types:
func = getattr(md, 'compute_%s' % a)
_, y = func(traj)
if self.sincos:
x.extend([np.sin(y), np.cos(y)])
else:
x.append(y)
return np.hstack(x)
class VonMisesFeaturizer(Featurizer):
"""Featurizer based on dihedral angles soft-binned along the unit circle.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
as a vector containing n soft-bins for each dihedral angle. Soft-bins are
computed by arranging n equal-spaced von Mises distributions along the unit
circle and using the PDF of those distributions to define the bin value.
Parameters
----------
types : list
One or more of ['phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4']
n_bins : int
Number of von Mises distributions to be used.
kappa : int or float
Shape parameter for the von Mises distributions.
"""
def __init__(self, types=['phi', 'psi'], n_bins=18, kappa=20.):
if isinstance(types, str):
types = [types]
self.types = list(types) # force a copy
if not isinstance(n_bins, int):
raise TypeError('bins must be of type int.')
if not isinstance(kappa, (int, float)):
raise TypeError('kappa must be numeric.')
self._n_bins = n_bins
self.loc = np.linspace(0, 2*np.pi, self.n_bins)
self.kappa = kappa
known = {'phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4'}
if not set(types).issubset(known):
raise ValueError('angles must be a subset of %s. you supplied %s' %
(str(known), str(types)))
@property
def n_bins(self):
return self._n_bins
@n_bins.setter
def n_bins(self, x):
self._n_bins = x
self.loc = np.linspace(0, 2*np.pi, self.n_bins)
def describe_features(self, traj):
"""Return a list of dictionaries describing the dihderal features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the four atom indicies
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: Dihedral
- featuregroup: The bin index(0..nbins-1)
and dihedral type(phi/psi/chi1 etc )
"""
feature_descs = []
for dihed_type in self.types:
# TODO: Don't recompute dihedrals, just get the indices
func = getattr(md, 'compute_%s' % dihed_type)
# ainds is a list of four-tuples of atoms participating
# in each dihedral
aind_tuples, _ = func(traj)
top = traj.topology
bin_info =[]
resseqs = []
resids = []
resnames = []
all_aind = []
#its bin0---all phis bin1--all_phis
for bin_index in range(self.n_bins):
for ainds in aind_tuples:
resid = set(top.atom(ai).residue.index for ai in ainds)
all_aind.append(ainds)
bin_info += ["bin-%d"%bin_index]
resids += [list(resid)]
reseq = set(top.atom(ai).residue.resSeq for ai in ainds)
resseqs += [list(reseq)]
resname = set(top.atom(ai).residue.name for ai in ainds)
resnames += [list(resname)]
zippy = zip(all_aind, resseqs, resids, resnames)
#fast check to make sure we have the right number of features
assert len(bin_info) == len(aind_tuples) * self.n_bins
zippy = zip(["VonMises"]*len(bin_info),
[dihed_type]*len(bin_info),
bin_info,
zippy)
feature_descs.extend(dict_maker(zippy))
return feature_descs
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of soft-bins over dihdral angle space.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
x = []
for a in self.types:
func = getattr(md, 'compute_%s' % a)
_, y = func(traj)
res = vm.pdf(y[..., np.newaxis],
loc=self.loc, kappa=self.kappa)
#we reshape the results using a Fortran-like index order,
#so that it goes over the columns first. This should put the results
#phi dihedrals(all bin0 then all bin1), psi dihedrals(all_bin1)
x.extend(np.reshape(res, (1, -1, self.n_bins*y.shape[1]), order='F'))
return np.hstack(x)
class AlphaAngleFeaturizer(Featurizer):
"""Featurizer to extract alpha (dihedral) angles.
The alpha angle of residue `i` is the dihedral formed by the four CA atoms
of residues `i-1`, `i`, `i+1` and `i+2`.
Parameters
----------
sincos : bool
Instead of outputting the angle, return the sine and cosine of the
angle as separate features.
"""
def __init__(self, sincos=True):
self.sincos = sincos
self.atom_indices = None
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles of alpha carbon backbone
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
"""
ca = [a.index for a in traj.top.atoms if a.name == 'CA']
if len(ca) < 4:
return np.zeros((len(traj), 0), dtype=np.float32)
alpha_indices = np.array(
[(ca[i - 1], ca[i], ca[i + 1], ca[i + 2]) for i in range(1, len(ca) - 2)])
result = md.compute_dihedrals(traj, alpha_indices)
x = []
if self.atom_indices is None:
self.atom_indices = np.vstack(alpha_indices)
if self.sincos:
x.extend([np.cos(result), np.sin(result)])
else:
x.append(result)
return np.hstack(x)
def describe_features(self, traj):
"""Return a list of dictionaries describing the dihderal features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the four atom indicies
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: Alpha Angle
- featuregroup: the type of dihedral angle and whether sin or
cos has been applied.
"""
feature_descs = []
# fill in the atom indices using just the first frame
self.partial_transform(traj[0])
top = traj.topology
if self.atom_indices is None:
raise ValueError("Cannot describe features for "
"trajectories with "
"fewer than 4 alpha carbon"
"using AlphaAngleFeaturizer.")
aind_tuples = self.atom_indices
zippy = zippy_maker(aind_tuples, top)
if self.sincos:
zippy = itertools.product(["AlphaAngle"], ["N/A"], ['cos', 'sin'], zippy)
else:
zippy = itertools.product(["AlphaAngle"], ["N/A"], ['nosincos'], zippy)
feature_descs.extend(dict_maker(zippy))
return feature_descs
class KappaAngleFeaturizer(Featurizer):
"""Featurizer to extract kappa angles.
The kappa angle of residue `i` is the angle formed by the three CA atoms
of residues `i-offset`, `i` and `i+offset`. This featurizer extracts the
`n_residues - 2*offset` kappa angles of each frame in a trajectory.
Parameters
----------
cos : bool
Compute the cosine of the angle instead of the angle itself.
offset : int
Offset to use for when calculating the features. Defaults to 2.
I.e it will calculate the angles between i-2, i and i+2 CA
"""
def __init__(self, cos=True, offset=2):
self.cos = cos
self.atom_indices = None
self.offset = offset
def partial_transform(self, traj):
ca = [a.index for a in traj.top.atoms if a.name == 'CA']
if len(ca) < 2*self.offset + 1:
return np.zeros((len(traj), 0), dtype=np.float32)
angle_indices = np.array(
[(ca[i - self.offset], ca[i],
ca[i + self.offset]) for i in range(self.offset, len(ca) - self.offset)])
result = md.compute_angles(traj, angle_indices)
if self.atom_indices is None:
self.atom_indices = np.vstack(angle_indices)
if self.cos:
return np.cos(result)
return result
def describe_features(self, traj):
"""Return a list of dictionaries describing the dihderal features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the four atom indicies
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: KappaAngle
- featuregroup: the type of dihedral angle and whether
cos has been applied.
"""
feature_descs = []
# fill in the atom indices using just the first frame
self.partial_transform(traj[0])
top = traj.topology
if self.atom_indices is None:
raise ValueError("Cannot describe features for trajectories "
"with fewer than 5 alpha carbon"
"using KappaAngle Featurizer")
aind_tuples = self.atom_indices
zippy = zippy_maker(aind_tuples, top)
if self.cos:
zippy = itertools.product(["Kappa"],["N/A"], ['cos'], zippy)
else:
zippy = itertools.product(["Kappa"],["N/A"], ['nocos'], zippy)
feature_descs.extend(dict_maker(zippy))
return feature_descs
class AngleFeaturizer(Featurizer):
"""Featurizer based on angles between 3 atoms.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the angles between triples of amino-acid residues.
Parameters
----------
angle_indices : list of tuples
List of triplet atoms to compute the angles for. Please ensure that
they are properly sorted
cos : bool
Compute the cosine of the angle instead of the angle itself.
"""
def __init__(self, angle_indices=None, cos=True):
if angle_indices is None:
raise ValueError("Need to specify atom triplets to use")
self.angle_indices = np.vstack(angle_indices)
self.cos = cos
def partial_transform(self, traj):
result = md.compute_angles(traj, self.angle_indices)
if self.cos:
return np.cos(result)
return result
def describe_features(self, traj):
"""Return a list of dictionaries describing the dihderal features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: the four atom indicies
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: KappaAngle
- featuregroup: the type of dihedral angle and whether
cos has been applied.
"""
feature_descs = []
# fill in the atom indices using just the first frame
self.partial_transform(traj[0])
top = traj.topology
if self.angle_indices is None:
raise ValueError("Cannot describe features for trajectories")
aind_tuples = self.angle_indices
zippy = zippy_maker(aind_tuples, top)
if self.cos:
zippy = itertools.product(["Angle"],["N/A"], ['cos'], zippy)
else:
zippy = itertools.product(["Angle"],["N/A"], ['nocos'], zippy)
feature_descs.extend(dict_maker(zippy))
return feature_descs
class SASAFeaturizer(Featurizer):
"""Featurizer based on solvent-accessible surface areas.
Parameters
----------
mode : {'atom', 'residue'}, default='residue'
In mode == 'atom', the extracted features are the per-atom
SASA. In mode == 'residue', this is consolidated down to
the per-residue SASA by summing over the atoms in each
residue.
Other Parameters
----------------
probe_radius : float
n_sphere_points : int
If supplied, these arguments will be passed directly to
`mdtraj.shrake_rupley`, overriding default values.
See Also
--------
mdtraj.shrake_rupley
"""
def __init__(self, mode='residue', **kwargs):
self.mode = mode
self.kwargs = kwargs
def partial_transform(self, traj):
return md.shrake_rupley(traj, mode=self.mode, **self.kwargs)
def describe_features(self, traj):
"""Return a list of dictionaries describing the SASA features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each SASA feature
- resnames: names of residues
- atominds: atom index or atom indices in mode="residue"
- resseqs: residue ids (not necessarily 0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: SASA
- featuregroup: atom or residue
"""
feature_descs = []
_, mapping = md.geometry.sasa.shrake_rupley(traj, mode=self.mode, get_mapping=True)
top = traj.topology
if self.mode == "residue":
resids = np.unique(mapping)
resseqs = [top.residue(ri).resSeq for ri in resids]
resnames = [top.residue(ri).name for ri in resids]
atoms_in_res = [res.atoms for res in top.residues]
aind_tuples = []
# For each resdiue...
for i,x in enumerate(atoms_in_res):
# For each atom in the residues, append it's index
aind_tuples.append([atom.index for atom in x])
zippy = itertools.product(['SASA'],['N/A'],[self.mode], zip(aind_tuples, resseqs, resids, resnames))
else:
resids = [top.atom(ai).residue.index for ai in mapping]
resseqs = [top.atom(ai).residue.resSeq for ai in mapping]
resnames = [top.atom(ai).residue.name for ai in mapping]
zippy = itertools.product(['SASA'],['N/A'],[self.mode], zip(mapping, resseqs, resids, resnames))
feature_descs.extend(dict_maker(zippy))
return feature_descs
class ContactFeaturizer(Featurizer):
"""Featurizer based on residue-residue distances.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the distances between pairs of amino-acid residues.
The exact method for computing the the distance between two residues
is configurable with the ``scheme`` parameter.
Parameters
----------
contacts : np.ndarray or 'all'
array containing (0-indexed) indices of the residues to compute the
contacts for. (e.g. np.array([[0, 10], [0, 11]]) would compute
the contact between residue 0 and residue 10 as well as
the contact between residue 0 and residue 11.) [NOTE: if no
array is passed then 'all' contacts are calculated. This means
that the result will contain all contacts between residues
separated by at least 3 residues.]
scheme : {'ca', 'closest', 'closest-heavy'}
scheme to determine the distance between two residues:
'ca' : distance between two residues is given by the distance
between their alpha carbons
'closest' : distance is the closest distance between any
two atoms in the residues
'closest-heavy' : distance is the closest distance between
any two non-hydrogen atoms in the residues
ignore_nonprotein : bool
When using `contact==all`, don't compute contacts between
"residues" which are not protein (i.e. do not contain an alpha
carbon).
soft_min : bool, default=False
If soft_min is true, we will use a diffrentiable version of
the scheme. The exact expression used
is :math:`d = \frac{\beta}{log\sum_i{exp(\frac{\beta}{d_i}})}` where
beta is user parameter which defaults to 20nm. The expression
we use is copied from the plumed mindist calculator.
http://plumed.github.io/doc-v2.0/user-doc/html/mindist.html
soft_min_beta : float, default=20nm
The value of beta to use for the soft_min distance option.
Very large values might cause small contact distances to go to 0.
periodic : bool, default=True
If True, compute distances using periodic boundary conditions.
"""
def __init__(self, contacts='all', scheme='closest-heavy', ignore_nonprotein=True,
soft_min=False, soft_min_beta=20, periodic=True):
self.contacts = contacts
self.scheme = scheme
self.ignore_nonprotein = ignore_nonprotein
self.soft_min = soft_min
self.soft_min_beta = soft_min_beta
if self.soft_min and not 'soft_min' in inspect.signature(md.compute_contacts).parameters:
raise ValueError("Sorry but soft_min requires the latest version"
"of mdtraj")
self.periodic = periodic
def _transform(self, distances):
return distances
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space derived from
residue-residue distances
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
if self.soft_min:
distances, _ = md.compute_contacts(traj, self.contacts,
self.scheme, self.ignore_nonprotein,
soft_min=self.soft_min,
soft_min_beta=self.soft_min_beta,
periodic=self.periodic)
else:
distances, _ = md.compute_contacts(traj, self.contacts,
self.scheme, self.ignore_nonprotein,
periodic=self.periodic)
return self._transform(distances)
def describe_features(self, traj):
"""Return a list of dictionaries describing the contacts features.
Parameters
----------
traj : mdtraj.Trajectory
The trajectory to describe
Returns
-------
feature_descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames: unique names of residues
- atominds: atom indices(returns CA if scheme is ca_inds,otherwise
returns all atom_inds)
- resseqs: unique residue sequence ids (not necessarily
0-indexed)
- resids: unique residue ids (0-indexed)
- featurizer: Contact
- featuregroup: ca, heavy etc.
"""
feature_descs = []
# fill in the atom indices using just the first frame
if self.soft_min:
distances, residue_indices = md.compute_contacts(traj[0], self.contacts,
self.scheme,
self.ignore_nonprotein,
soft_min=self.soft_min,
soft_min_beta=self.soft_min_beta,
periodic=self.periodic)
else:
distances, residue_indices = md.compute_contacts(traj[0], self.contacts,
self.scheme,
self.ignore_nonprotein,
periodic=self.periodic)
top = traj.topology
aind = []
resseqs = []
resnames = []
if self.scheme=='ca':
atom_ind_list = [[j.index for j in i.atoms if j.name=='CA']
for i in top.residues]
elif self.scheme=='closest-heavy':
atom_ind_list = [[j.index for j in i.atoms if j.element.name!="hydrogen"]
for i in top.residues]
elif self.scheme=='closest':
atom_ind_list = [[j.index for j in i.atoms] for i in top.residues]
else:
atom_ind_list = [["N/A"] for i in top.residues]
for resid_ids in residue_indices:
aind += [[atom_ind_list[ri] for ri in resid_ids]]
resseqs += [[top.residue(ri).resSeq for ri in resid_ids]]
resnames += [[top.residue(ri).name for ri in resid_ids]]
zippy = itertools.product(["Contact"], [self.scheme],
["{}".format(self.soft_min_beta)],
zip(aind, resseqs, residue_indices, resnames))
feature_descs.extend(dict_maker(zippy))
return feature_descs
class BinaryContactFeaturizer(ContactFeaturizer):
"""Featurizer based on residue-residue contacts below a cutoff.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the binary contacts between pairs of amino-acid residues.
The exact method for computing the the distance between two residues
is configurable with the ``scheme`` parameter.
Parameters
----------
contacts : np.ndarray or 'all'
array containing (0-indexed) indices of the residues to compute the
contacts for. (e.g. np.array([[0, 10], [0, 11]]) would compute
the contact between residue 0 and residue 10 as well as
the contact between residue 0 and residue 11.) [NOTE: if no
array is passed then 'all' contacts are calculated. This means
that the result will contain all contacts between residues
separated by at least 3 residues.]
scheme : {'ca', 'closest', 'closest-heavy'}
scheme to determine the distance between two residues:
'ca' : distance between two residues is given by the distance
between their alpha carbons
'closest' : distance is the closest distance between any
two atoms in the residues
'closest-heavy' : distance is the closest distance between
any two non-hydrogen atoms in the residues
ignore_nonprotein : bool
When using `contact==all`, don't compute contacts between
"residues" which are not protein (i.e. do not contain an alpha
carbon).
cutoff : float, default=0.8
Distances shorter than CUTOFF [nm] are returned as '1' and
distances longer than CUTOFF are returned as '0'.
"""
def __init__(self, contacts='all', scheme='closest-heavy', ignore_nonprotein=True, cutoff=0.8):
super(BinaryContactFeaturizer, self).__init__(contacts=contacts, scheme=scheme,
ignore_nonprotein=ignore_nonprotein)
if cutoff < 0:
raise ValueError('cutoff must be a positive distance [nm]')
self.cutoff = cutoff
def _transform(self, distances):
return distances < self.cutoff
class LogisticContactFeaturizer(ContactFeaturizer):
"""Featurizer based on logistic-transformed residue-residue contacts.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the distances between pairs of amino-acid residues transformed
by the logistic function (reflected across the x axis):
result = [1 + exp(k*(distances - cutoff))]^-1
The exact method for computing the the distance between two residues
is configurable with the ``scheme`` parameter.
Parameters
----------
contacts : np.ndarray or 'all'
array containing (0-indexed) indices of the residues to compute the
contacts for. (e.g. np.array([[0, 10], [0, 11]]) would compute
the contact between residue 0 and residue 10 as well as
the contact between residue 0 and residue 11.) [NOTE: if no
array is passed then 'all' contacts are calculated. This means
that the result will contain all contacts between residues
separated by at least 3 residues.]
scheme : {'ca', 'closest', 'closest-heavy'}
scheme to determine the distance between two residues:
'ca' : distance between two residues is given by the distance
between their alpha carbons
'closest' : distance is the closest distance between any
two atoms in the residues
'closest-heavy' : distance is the closest distance between
any two non-hydrogen atoms in the residues
ignore_nonprotein : bool
When using `contact==all`, don't compute contacts between
"residues" which are not protein (i.e. do not contain an alpha
carbon).
center : float, default=0.8
Determines the midpoint of the sigmoid, x_0, [nm]. Distances
shorter than CENTER will return values greater than 0.5 and
distances larger than CENTER will return values smaller than 0.5.
steepness : float, default=20
Determines the steepness of the logistic curve, [1/nm]. Small
and large distances will approach ouput values of 1 and 0,
respectively, more quickly.
"""
def __init__(self, contacts='all', scheme='closest-heavy', ignore_nonprotein=True,
center=0.8, steepness=20):
super(LogisticContactFeaturizer, self).__init__(contacts=contacts, scheme=scheme,
ignore_nonprotein=ignore_nonprotein)
if center < 0:
raise ValueError('center must be a positive distance [nm]')
if steepness < 0:
raise ValueError('steepness must be a positive inverse distance [1/nm]')
self.center = center
self.steepness = steepness
def _transform(self, distances):
result = 1.0/(1+np.exp(self.steepness*(distances-self.center)))
return result
class GaussianSolventFeaturizer(Featurizer):
"""Featurizer on weighted pairwise distance between solute and solvent.
We apply a Gaussian kernel to each solute-solvent pairwise distance
and sum the kernels for each solute atom, resulting in a vector
of len(solute_indices).
The values can be physically interpreted as the degree of solvation
of each solute atom.
Parameters
----------
solute_indices : np.ndarray, shape=(n_solute,)
Indices of solute atoms
solvent_indices : np.ndarray, shape=(n_solvent,)
Indices of solvent atoms
sigma : float
Sets the length scale for the gaussian kernel
periodic : bool
Whether to consider a periodic system in distance calculations
References
----------
..[1] Gu, Chen, et al. BMC Bioinformatics 14, no. Suppl 2
(January 21, 2013): S8. doi:10.1186/1471-2105-14-S2-S8.
"""
def __init__(self, solute_indices, solvent_indices, sigma, periodic=False):
self.solute_indices = solute_indices
self.solvent_indices = solvent_indices
self.sigma = sigma
self.periodic = periodic
self.n_features = len(self.solute_indices)
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of solvent fingerprints
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
# The result vector
fingerprints = np.zeros((traj.n_frames, self.n_features))
atom_pairs = np.zeros((len(self.solvent_indices), 2))
sigma = self.sigma
for i, solute_i in enumerate(self.solute_indices):
# For each solute atom, calculate distance to all solvent
# molecules
atom_pairs[:, 0] = solute_i
atom_pairs[:, 1] = self.solvent_indices
distances = md.compute_distances(traj, atom_pairs, periodic=True)
distances = np.exp(-distances / (2 * sigma * sigma))
# Sum over water atoms for all frames
fingerprints[:, i] = np.sum(distances, axis=1)
return fingerprints
class RawPositionsFeaturizer(Featurizer):
"""Featurize an MD trajectory into a vector space with the raw
cartesian coordinates
Parameters
----------
atom_indices : None or array-like, dtype=int, shape=(n_atoms)
If specified, only return the coordinates for the atoms
given by atom_indices. Otherwise return all atoms
ref_traj : None or md.Trajectory
If specified, superpose each trajectory to the first frame of
ref_traj before getting positions. If atom_indices is also
specified, only superpose based on those atoms. The superposition
will modify each transformed trajectory *in place*.
"""
def __init__(self, atom_indices=None, ref_traj=None):
super(RawPositionsFeaturizer, self).__init__()
self.atom_indices = atom_indices
if atom_indices is not None and ref_traj is not None:
self.ref_traj = ref_traj.atom_slice(atom_indices)
else:
self.ref_traj = ref_traj
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space with the raw
cartesian coordinates.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
Notes
-----
If you requested superposition (gave `ref_traj` in __init__) the
input trajectory will be modified.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
# Optionally take only certain atoms
if self.atom_indices is not None:
p_traj = traj.atom_slice(self.atom_indices)
else:
p_traj = traj
# Optionally superpose to a reference trajectory.
if self.ref_traj is not None:
p_traj.superpose(self.ref_traj, parallel=False)
# Get the positions and reshape.
value = p_traj.xyz.reshape(len(p_traj), -1)
return value
class DRIDFeaturizer(Featurizer):
"""Featurizer based on distribution of reciprocal interatomic
distances (DRID)
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing the first three moments of a collection of
reciprocal interatomic distances. For details, see [1].
References
----------
.. [1] Zhou, Caflisch; Distribution of Reciprocal of Interatomic Distances:
A Fast Structural Metric. JCTC 2012 doi:10.1021/ct3003145
Parameters
----------
atom_indices : array-like of ints, default=None
Which atom indices to use during DRID featurization. If None,
all atoms are used
"""
def __init__(self, atom_indices=None):
self.atom_indices = atom_indices
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space using the distribution
of reciprocal interatomic distance (DRID) method.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
return md.geometry.compute_drid(traj, self.atom_indices)
class Slicer(Featurizer):
"""Extracts slices (e.g. subsets) from data along the feature dimension.
Parameters
----------
index : array_like of integer, optional
If given, extract only these features by index. This corresponds
to selecting these columns from the feature-trajectories.
first : int, optional
If given, extract the first this-many features. This is useful
when features are sorted like in PCA or tICA.
Notes
-----
You must give either index or first (but not both)
"""
def __init__(self, index=None, first=None):
if index is None and first is None:
raise ValueError("Please specify either index or first, "
"not neither")
if index is not None and first is not None:
raise ValueError("Please specify either index or first, "
"not both.")
self.index = index
self.first = first
def partial_transform(self, traj):
"""Slice a single input array along to select a subset of features.
Parameters
----------
traj : np.ndarray, shape=(n_samples, n_features)
A sample to slice.
Returns
-------
sliced_traj : np.ndarray shape=(n_samples, n_feature_subset)
Slice of traj
"""
if self.index is not None:
return traj[:, self.index]
else:
return traj[:, :self.first]
class FirstSlicer(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError("Please use Slicer(first=x)")
|
msmbuilder/msmbuilder
|
msmbuilder/featurizer/featurizer.py
|
Python
|
lgpl-2.1
| 62,069
|
[
"Gaussian",
"MDTraj"
] |
89aba3e69afb8f836c56615dae53b6b4cc721ff46fc3c59f7cf2e81789ae8e8f
|
from rdkit.SimDivFilters import rdSimDivPickers as rdsimdiv
import numpy
from rdkit import RDRandom
RDRandom.seed(23)
pkr = rdsimdiv.MaxMinPicker()
n = 1000
m = 80
dataPts = []
for i in range(n):
pt = numpy.zeros(2, 'd')
pt[0] = 10. * RDRandom.random()
pt[1] = 10. * RDRandom.random()
dataPts.append(pt)
# compute the distance matrix
distMat = numpy.zeros(n * (n - 1) / 2, 'd')
for i in range(n - 1):
itab = n * i - ((i + 1) * (i + 2)) / 2
pt1 = dataPts[i]
for j in range(i + 1, n):
id = itab + j
pt2 = dataPts[j]
diff = pt2 - pt1
dist = numpy.sqrt(numpy.dot(diff, diff))
distMat[id] = dist
# now do the picking
res = pkr.Pick(distMat, n, m)
print("Results:")
for k in res:
print(dataPts[k][0], dataPts[k][1])
|
ptosco/rdkit
|
Code/SimDivPickers/Wrap/testMaxMin.py
|
Python
|
bsd-3-clause
| 755
|
[
"RDKit"
] |
33b69ef49c102b4319d9fa90e664cc6937495cb71de45d6fcfd87d9674d55ed6
|
# -*- coding: utf-8 -*-
"""
Generators for random graphs.
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from __future__ import division
import itertools
import math
import random
import networkx as nx
from .classic import empty_graph, path_graph, complete_graph
from .degree_seq import degree_sequence_tree
from collections import defaultdict
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'])
__all__ = ['fast_gnp_random_graph',
'gnp_random_graph',
'dense_gnm_random_graph',
'gnm_random_graph',
'erdos_renyi_graph',
'binomial_graph',
'newman_watts_strogatz_graph',
'watts_strogatz_graph',
'connected_watts_strogatz_graph',
'random_regular_graph',
'barabasi_albert_graph',
'powerlaw_cluster_graph',
'random_lobster',
'random_shell_graph',
'random_powerlaw_tree',
'random_powerlaw_tree_sequence',
'random_kernel_graph']
#-------------------------------------------------------------------------
# Some Famous Random Graphs
#-------------------------------------------------------------------------
def fast_gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If True, this function returns a directed graph.
Notes
-----
The `G_{n,p}` graph algorithm chooses each of the `[n (n - 1)] / 2`
(undirected) or `n (n - 1)` (directed) possible edges with probability `p`.
This algorithm runs in `O(n + m)` time, where `m` is the expected number of
edges, which equals `p n (n - 1) / 2`. This should be faster than
:func:`gnp_random_graph` when `p` is small and the expected number of edges
is small (that is, the graph is sparse).
See Also
--------
gnp_random_graph
References
----------
.. [1] Vladimir Batagelj and Ulrik Brandes,
"Efficient generation of large random networks",
Phys. Rev. E, 71, 036113, 2005.
"""
G = empty_graph(n)
G.name="fast_gnp_random_graph(%s,%s)"%(n,p)
if not seed is None:
random.seed(seed)
if p <= 0 or p >= 1:
return nx.gnp_random_graph(n,p,directed=directed)
w = -1
lp = math.log(1.0 - p)
if directed:
G = nx.DiGraph(G)
# Nodes in graph are from 0,n-1 (start with v as the first node index).
v = 0
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
if v == w: # avoid self loops
w = w + 1
while v < n <= w:
w = w - n
v = v + 1
if v == w: # avoid self loops
w = w + 1
if v < n:
G.add_edge(v, w)
else:
# Nodes in graph are from 0,n-1 (start with v as the second node index).
v = 1
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
while w >= v and v < n:
w = w - v
v = v + 1
if v < n:
G.add_edge(v, w)
return G
def gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
The :math:`G_{n,p}` model chooses each of the possible edges with probability
:math:`p`.
The functions :func:`binomial_graph` and :func:`erdos_renyi_graph` are
aliases of this function.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If True, this function returns a directed graph.
See Also
--------
fast_gnp_random_graph
Notes
-----
This algorithm runs in `O(n^2)` time. For sparse graphs (that is, for
small values of `p`), :func:`fast_gnp_random_graph` is a faster algorithm.
References
----------
.. [1] P. Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959).
.. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959).
"""
if directed:
G=nx.DiGraph()
else:
G=nx.Graph()
G.add_nodes_from(range(n))
G.name="gnp_random_graph(%s,%s)"%(n,p)
if p<=0:
return G
if p>=1:
return complete_graph(n,create_using=G)
if not seed is None:
random.seed(seed)
if G.is_directed():
edges=itertools.permutations(range(n),2)
else:
edges=itertools.combinations(range(n),2)
for e in edges:
if random.random() < p:
G.add_edge(*e)
return G
# add some aliases to common names
binomial_graph=gnp_random_graph
erdos_renyi_graph=gnp_random_graph
def dense_gnm_random_graph(n, m, seed=None):
"""Returns a `G_{n,m}` random graph.
In the `G_{n,m}` model, a graph is chosen uniformly at random from the set
of all graphs with `n` nodes and `m` edges.
This algorithm should be faster than :func:`gnm_random_graph` for dense
graphs.
Parameters
----------
n : int
The number of nodes.
m : int
The number of edges.
seed : int, optional
Seed for random number generator (default=None).
See Also
--------
gnm_random_graph()
Notes
-----
Algorithm by Keith M. Briggs Mar 31, 2006.
Inspired by Knuth's Algorithm S (Selection sampling technique),
in section 3.4.2 of [1]_.
References
----------
.. [1] Donald E. Knuth, The Art of Computer Programming,
Volume 2/Seminumerical algorithms, Third Edition, Addison-Wesley, 1997.
"""
mmax=n*(n-1)/2
if m>=mmax:
G=complete_graph(n)
else:
G=empty_graph(n)
G.name="dense_gnm_random_graph(%s,%s)"%(n,m)
if n==1 or m>=mmax:
return G
if seed is not None:
random.seed(seed)
u=0
v=1
t=0
k=0
while True:
if random.randrange(mmax-t)<m-k:
G.add_edge(u,v)
k+=1
if k==m: return G
t+=1
v+=1
if v==n: # go to next row of adjacency matrix
u+=1
v=u+1
def gnm_random_graph(n, m, seed=None, directed=False):
"""Returns a `G_{n,m}` random graph.
In the `G_{n,m}` model, a graph is chosen uniformly at random from the set
of all graphs with `n` nodes and `m` edges.
This algorithm should be faster than :func:`dense_gnm_random_graph` for
sparse graphs.
Parameters
----------
n : int
The number of nodes.
m : int
The number of edges.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If True return a directed graph
See also
--------
dense_gnm_random_graph
"""
if directed:
G=nx.DiGraph()
else:
G=nx.Graph()
G.add_nodes_from(range(n))
G.name="gnm_random_graph(%s,%s)"%(n,m)
if seed is not None:
random.seed(seed)
if n==1:
return G
max_edges=n*(n-1)
if not directed:
max_edges/=2.0
if m>=max_edges:
return complete_graph(n,create_using=G)
nlist = list(G)
edge_count=0
while edge_count < m:
# generate random edge,u,v
u = random.choice(nlist)
v = random.choice(nlist)
if u==v or G.has_edge(u,v):
continue
else:
G.add_edge(u,v)
edge_count=edge_count+1
return G
def newman_watts_strogatz_graph(n, k, p, seed=None):
"""Return a Newman–Watts–Strogatz small-world graph.
Parameters
----------
n : int
The number of nodes.
k : int
Each node is joined with its `k` nearest neighbors in a ring
topology.
p : float
The probability of adding a new edge for each edge.
seed : int, optional
The seed for the random number generator (the default is None).
Notes
-----
First create a ring over `n` nodes. Then each node in the ring is
connected with its `k` nearest neighbors (or `k - 1` neighbors if `k`
is odd). Then shortcuts are created by adding new edges as follows: for
each edge `(u, v)` in the underlying "`n`-ring with `k` nearest
neighbors" with probability :math:`p` add a new edge `(u, w)` with
randomly-chosen existing node `w`. In contrast with
:func:`watts_strogatz_graph`, no edges are removed.
See Also
--------
watts_strogatz_graph()
References
----------
.. [1] M. E. J. Newman and D. J. Watts,
Renormalization group analysis of the small-world network model,
Physics Letters A, 263, 341, 1999.
http://dx.doi.org/10.1016/S0375-9601(99)00757-4
"""
if seed is not None:
random.seed(seed)
if k>=n:
raise nx.NetworkXError("k>=n, choose smaller k or larger n")
G=empty_graph(n)
G.name="newman_watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
nlist = list(G.nodes())
fromv = nlist
# connect the k/2 neighbors
for j in range(1, k // 2+1):
tov = fromv[j:] + fromv[0:j] # the first j are now last
for i in range(len(fromv)):
G.add_edge(fromv[i], tov[i])
# for each edge u-v, with probability p, randomly select existing
# node w and add new edge u-w
e = list(G.edges())
for (u, v) in e:
if random.random() < p:
w = random.choice(nlist)
# no self-loops and reject if edge u-w exists
# is that the correct NWS model?
while w == u or G.has_edge(u, w):
w = random.choice(nlist)
if G.degree(u) >= n-1:
break # skip this rewiring
else:
G.add_edge(u,w)
return G
def watts_strogatz_graph(n, k, p, seed=None):
"""Return a Watts–Strogatz small-world graph.
Parameters
----------
n : int
The number of nodes
k : int
Each node is joined with its `k` nearest neighbors in a ring
topology.
p : float
The probability of rewiring each edge
seed : int, optional
Seed for random number generator (default=None)
See Also
--------
newman_watts_strogatz_graph()
connected_watts_strogatz_graph()
Notes
-----
First create a ring over `n` nodes. Then each node in the ring is joined
to its `k` nearest neighbors (or `k - 1` neighbors if `k` is odd).
Then shortcuts are created by replacing some edges as follows: for each
edge `(u, v)` in the underlying "`n`-ring with `k` nearest neighbors"
with probability :math:`p` replace it with a new edge `(u, w)` with uniformly
random choice of existing node `w`.
In contrast with :func:`newman_watts_strogatz_graph`, the random rewiring
does not increase the number of edges. The rewired graph is not guaranteed
to be connected as in :func:`connected_watts_strogatz_graph`.
References
----------
.. [1] Duncan J. Watts and Steven H. Strogatz,
Collective dynamics of small-world networks,
Nature, 393, pp. 440--442, 1998.
"""
if k>=n:
raise nx.NetworkXError("k>=n, choose smaller k or larger n")
if seed is not None:
random.seed(seed)
G = nx.Graph()
G.name="watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
nodes = list(range(n)) # nodes are labeled 0 to n-1
# connect each node to k/2 neighbors
for j in range(1, k // 2+1):
targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
G.add_edges_from(zip(nodes,targets))
# rewire edges from each node
# loop over all nodes in order (label) and neighbors in order (distance)
# no self loops or multiple edges allowed
for j in range(1, k // 2+1): # outer loop is neighbors
targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
# inner loop in node order
for u,v in zip(nodes,targets):
if random.random() < p:
w = random.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = random.choice(nodes)
if G.degree(u) >= n-1:
break # skip this rewiring
else:
G.remove_edge(u,v)
G.add_edge(u,w)
return G
def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None):
"""Returns a connected Watts–Strogatz small-world graph.
Attempts to generate a connected graph by repeated generation of
Watts–Strogatz small-world graphs. An exception is raised if the maximum
number of tries is exceeded.
Parameters
----------
n : int
The number of nodes
k : int
Each node is joined with its `k` nearest neighbors in a ring
topology.
p : float
The probability of rewiring each edge
tries : int
Number of attempts to generate a connected graph.
seed : int, optional
The seed for random number generator.
See Also
--------
newman_watts_strogatz_graph()
watts_strogatz_graph()
"""
for i in range(tries):
G = watts_strogatz_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def random_regular_graph(d, n, seed=None):
"""Returns a random `d`-regular graph on `n` nodes.
The resulting graph has no self-loops or parallel edges.
Parameters
----------
d : int
The degree of each node.
n : integer
The number of nodes. The value of :math:`n * d` must be even.
seed : hashable object
The seed for random number generator.
Notes
-----
The nodes are numbered from `0` to `n - 1`.
Kim and Vu's paper [2]_ shows that this algorithm samples in an
asymptotically uniform way from the space of random graphs when
`d = O(n^{1 / 3 - \epsilon})`.
Raises
------
NetworkXError
If :math:`n * d` is odd or `d` is greater than or equal to `n`.
References
----------
.. [1] A. Steger and N. Wormald,
Generating random regular graphs quickly,
Probability and Computing 8 (1999), 377-396, 1999.
http://citeseer.ist.psu.edu/steger99generating.html
.. [2] Jeong Han Kim and Van H. Vu,
Generating random regular graphs,
Proceedings of the thirty-fifth ACM symposium on Theory of computing,
San Diego, CA, USA, pp 213--222, 2003.
http://portal.acm.org/citation.cfm?id=780542.780576
"""
if (n * d) % 2 != 0:
raise nx.NetworkXError("n * d must be even")
if not 0 <= d < n:
raise nx.NetworkXError("the 0 <= d < n inequality must be satisfied")
if d == 0:
return empty_graph(n)
if seed is not None:
random.seed(seed)
def _suitable(edges, potential_edges):
# Helper subroutine to check if there are suitable edges remaining
# If False, the generation of the graph has failed
if not potential_edges:
return True
for s1 in potential_edges:
for s2 in potential_edges:
# Two iterators on the same dictionary are guaranteed
# to visit it in the same order if there are no
# intervening modifications.
if s1 == s2:
# Only need to consider s1-s2 pair one time
break
if s1 > s2:
s1, s2 = s2, s1
if (s1, s2) not in edges:
return True
return False
def _try_creation():
# Attempt to create an edge set
edges = set()
stubs = list(range(n)) * d
while stubs:
potential_edges = defaultdict(lambda: 0)
random.shuffle(stubs)
stubiter = iter(stubs)
for s1, s2 in zip(stubiter, stubiter):
if s1 > s2:
s1, s2 = s2, s1
if s1 != s2 and ((s1, s2) not in edges):
edges.add((s1, s2))
else:
potential_edges[s1] += 1
potential_edges[s2] += 1
if not _suitable(edges, potential_edges):
return None # failed to find suitable edge set
stubs = [node for node, potential in potential_edges.items()
for _ in range(potential)]
return edges
# Even though a suitable edge set exists,
# the generation of such a set is not guaranteed.
# Try repeatedly to find one.
edges = _try_creation()
while edges is None:
edges = _try_creation()
G = nx.Graph()
G.name = "random_regular_graph(%s, %s)" % (d, n)
G.add_edges_from(edges)
return G
def _random_subset(seq,m):
""" Return m unique elements from seq.
This differs from random.sample which can return repeated
elements if seq holds repeated elements.
"""
targets=set()
while len(targets)<m:
x=random.choice(seq)
targets.add(x)
return targets
def barabasi_albert_graph(n, m, seed=None):
"""Returns a random graph according to the Barabási–Albert preferential
attachment model.
A graph of `n` nodes is grown by attaching new nodes each with `m`
edges that are preferentially attached to existing nodes with high degree.
Parameters
----------
n : int
Number of nodes
m : int
Number of edges to attach from a new node to existing nodes
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Graph
Raises
------
NetworkXError
If `m` does not satisfy ``1 <= m < n``.
References
----------
.. [1] A. L. Barabási and R. Albert "Emergence of scaling in
random networks", Science 286, pp 509-512, 1999.
"""
if m < 1 or m >=n:
raise nx.NetworkXError("Barabási–Albert network must have m >= 1"
" and m < n, m = %d, n = %d" % (m, n))
if seed is not None:
random.seed(seed)
# Add m initial nodes (m0 in barabasi-speak)
G=empty_graph(m)
G.name="barabasi_albert_graph(%s,%s)"%(n,m)
# Target nodes for new edges
targets=list(range(m))
# List of existing nodes, with nodes repeated once for each adjacent edge
repeated_nodes=[]
# Start adding the other n-m nodes. The first node is m.
source=m
while source<n:
# Add edges to m nodes from the source.
G.add_edges_from(zip([source]*m,targets))
# Add one node to the list for each new edge just created.
repeated_nodes.extend(targets)
# And the new node "source" has m edges to add to the list.
repeated_nodes.extend([source]*m)
# Now choose m unique nodes from the existing nodes
# Pick uniformly from repeated_nodes (preferential attachement)
targets = _random_subset(repeated_nodes,m)
source += 1
return G
def powerlaw_cluster_graph(n, m, p, seed=None):
"""Holme and Kim algorithm for growing graphs with powerlaw
degree distribution and approximate average clustering.
Parameters
----------
n : int
the number of nodes
m : int
the number of random edges to add for each new node
p : float,
Probability of adding a triangle after adding a random edge
seed : int, optional
Seed for random number generator (default=None).
Notes
-----
The average clustering has a hard time getting above a certain
cutoff that depends on `m`. This cutoff is often quite low. The
transitivity (fraction of triangles to possible triangles) seems to
decrease with network size.
It is essentially the Barabási–Albert (BA) growth model with an
extra step that each random edge is followed by a chance of
making an edge to one of its neighbors too (and thus a triangle).
This algorithm improves on BA in the sense that it enables a
higher average clustering to be attained if desired.
It seems possible to have a disconnected graph with this algorithm
since the initial `m` nodes may not be all linked to a new node
on the first iteration like the BA model.
Raises
------
NetworkXError
If `m` does not satisfy ``1 <= m <= n`` or `p` does not
satisfy ``0 <= p <= 1``.
References
----------
.. [1] P. Holme and B. J. Kim,
"Growing scale-free networks with tunable clustering",
Phys. Rev. E, 65, 026107, 2002.
"""
if m < 1 or n < m:
raise nx.NetworkXError(\
"NetworkXError must have m>1 and m<n, m=%d,n=%d"%(m,n))
if p > 1 or p < 0:
raise nx.NetworkXError(\
"NetworkXError p must be in [0,1], p=%f"%(p))
if seed is not None:
random.seed(seed)
G=empty_graph(m) # add m initial nodes (m0 in barabasi-speak)
G.name="Powerlaw-Cluster Graph"
repeated_nodes = list(G.nodes()) # list of existing nodes to sample from
# with nodes repeated once for each adjacent edge
source=m # next node is m
while source<n: # Now add the other n-1 nodes
possible_targets = _random_subset(repeated_nodes,m)
# do one preferential attachment for new node
target=possible_targets.pop()
G.add_edge(source,target)
repeated_nodes.append(target) # add one node to list for each new link
count=1
while count<m: # add m-1 more new links
if random.random()<p: # clustering step: add triangle
neighborhood=[nbr for nbr in G.neighbors(target) \
if not G.has_edge(source,nbr) \
and not nbr==source]
if neighborhood: # if there is a neighbor without a link
nbr=random.choice(neighborhood)
G.add_edge(source,nbr) # add triangle
repeated_nodes.append(nbr)
count=count+1
continue # go to top of while loop
# else do preferential attachment step if above fails
target=possible_targets.pop()
G.add_edge(source,target)
repeated_nodes.append(target)
count=count+1
repeated_nodes.extend([source]*m) # add source node to list m times
source += 1
return G
def random_lobster(n, p1, p2, seed=None):
"""Returns a random lobster graph.
A lobster is a tree that reduces to a caterpillar when pruning all
leaf nodes. A caterpillar is a tree that reduces to a path graph
when pruning all leaf nodes; setting `p2` to zero produces a caterillar.
Parameters
----------
n : int
The expected number of nodes in the backbone
p1 : float
Probability of adding an edge to the backbone
p2 : float
Probability of adding an edge one level beyond backbone
seed : int, optional
Seed for random number generator (default=None).
"""
# a necessary ingredient in any self-respecting graph library
if seed is not None:
random.seed(seed)
llen=int(2*random.random()*n + 0.5)
L=path_graph(llen)
L.name="random_lobster(%d,%s,%s)"%(n,p1,p2)
# build caterpillar: add edges to path graph with probability p1
current_node=llen-1
for n in range(llen):
if random.random()<p1: # add fuzzy caterpillar parts
current_node+=1
L.add_edge(n,current_node)
if random.random()<p2: # add crunchy lobster bits
current_node+=1
L.add_edge(current_node-1,current_node)
return L # voila, un lobster!
def random_shell_graph(constructor, seed=None):
"""Returns a random shell graph for the constructor given.
Parameters
----------
constructor : list of three-tuples
Represents the parameters for a shell, starting at the center
shell. Each element of the list must be of the form `(n, m,
d)`, where `n` is the number of nodes in the shell, `m` is
the number of edges in the shell, and `d` is the ratio of
inter-shell (next) edges to intra-shell edges. If `d` is zero,
there will be no intra-shell edges, and if `d` is one there
will be all possible intra-shell edges.
seed : int, optional
Seed for random number generator (default=None).
Examples
--------
>>> constructor = [(10, 20, 0.8), (20, 40, 0.8)]
>>> G = nx.random_shell_graph(constructor)
"""
G=empty_graph(0)
G.name="random_shell_graph(constructor)"
if seed is not None:
random.seed(seed)
glist=[]
intra_edges=[]
nnodes=0
# create gnm graphs for each shell
for (n,m,d) in constructor:
inter_edges=int(m*d)
intra_edges.append(m-inter_edges)
g=nx.convert_node_labels_to_integers(
gnm_random_graph(n,inter_edges),
first_label=nnodes)
glist.append(g)
nnodes+=n
G=nx.operators.union(G,g)
# connect the shells randomly
for gi in range(len(glist)-1):
nlist1 = list(glist[gi])
nlist2 = list(glist[gi + 1])
total_edges=intra_edges[gi]
edge_count=0
while edge_count < total_edges:
u = random.choice(nlist1)
v = random.choice(nlist2)
if u==v or G.has_edge(u,v):
continue
else:
G.add_edge(u,v)
edge_count=edge_count+1
return G
def random_powerlaw_tree(n, gamma=3, seed=None, tries=100):
"""Returns a tree with a power law degree distribution.
Parameters
----------
n : int
The number of nodes.
gamma : float
Exponent of the power law.
seed : int, optional
Seed for random number generator (default=None).
tries : int
Number of attempts to adjust the sequence to make it a tree.
Raises
------
NetworkXError
If no valid sequence is found within the maximum number of
attempts.
Notes
-----
A trial power law degree sequence is chosen and then elements are
swapped with new elements from a powerlaw distribution until the
sequence makes a tree (by checking, for example, that the number of
edges is one smaller than the number of nodes).
"""
# This call may raise a NetworkXError if the number of tries is succeeded.
seq = random_powerlaw_tree_sequence(n, gamma=gamma, seed=seed, tries=tries)
G = degree_sequence_tree(seq)
G.name = "random_powerlaw_tree(%s,%s)" % (n, gamma)
return G
def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100):
"""Returns a degree sequence for a tree with a power law distribution.
Parameters
----------
n : int,
The number of nodes.
gamma : float
Exponent of the power law.
seed : int, optional
Seed for random number generator (default=None).
tries : int
Number of attempts to adjust the sequence to make it a tree.
Raises
------
NetworkXError
If no valid sequence is found within the maximum number of
attempts.
Notes
-----
A trial power law degree sequence is chosen and then elements are
swapped with new elements from a power law distribution until
the sequence makes a tree (by checking, for example, that the number of
edges is one smaller than the number of nodes).
"""
if seed is not None:
random.seed(seed)
# get trial sequence
z = nx.utils.powerlaw_sequence(n, exponent=gamma)
# round to integer values in the range [0,n]
zseq = [min(n, max(int(round(s)), 0)) for s in z]
# another sequence to swap values from
z = nx.utils.powerlaw_sequence(tries, exponent=gamma)
# round to integer values in the range [0,n]
swap = [min(n, max(int(round(s)), 0)) for s in z]
for deg in swap:
# If this degree sequence can be the degree sequence of a tree, return
# it. It can be a tree if the number of edges is one fewer than the
# number of nodes, or in other words, `n - sum(zseq) / 2 == 1`. We
# use an equivalent condition below that avoids floating point
# operations.
if 2 * n - sum(zseq) == 2:
return zseq
index = random.randint(0, n - 1)
zseq[index] = swap.pop()
raise nx.NetworkXError('Exceeded max (%d) attempts for a valid tree'
' sequence.' % tries)
def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None):
"""Return an random graph based on the specified kernel.
The algorithm chooses each of the `[n(n-1)]/2` possible edges with
probability specified by a kernel `\kappa(x,y)` [1]_. The kernel
`\kappa(x,y)` must be a symmetric (in `x,y`), non-negative,
bounded function.
Parameters
----------
n : int
The number of nodes
kernal_integral : function
Function that returns the definite integral of the kernel `\kappa(x,y)`,
`F(y,a,b) := \int_a^b \kappa(x,y)dx`
kernel_root: function (optional)
Function that returns the root `b` of the equation `F(y,a,b) = r`.
If None, the root is found using :func:`scipy.optimize.brentq`
(this requires SciPy).
seed : int, optional
Seed for random number generator (default=None)
Notes
-----
The kernel is specified through its definite integral which must be
provided as one of the arguments. If the integral and root of the
kernel integral can be found in `O(1)` time then this algorithm runs in
time `O(n+m)` where m is the expected number of edges [2]_.
The nodes are set to integers from 0 to n-1.
Examples
--------
Generate an Erdős–Rényi random graph `G(n,c/n)`, with kernel
`\kappa(x,y)=c` where `c` is the mean expected degree.
>>> def integral(u, w, z):
... return c*(z-w)
>>> def root(u, w, r):
... return r/c+w
>>> c = 1
>>> graph = random_kernel_graph(1000, integral, root)
See Also
--------
gnp_random_graph
expected_degree_graph
References
----------
.. [1] Bollobás, Béla, Janson, S. and Riordan, O.
"The phase transition in inhomogeneous random graphs",
*Random Structures Algorithms*, 31, 3--122, 2007.
.. [2] Hagberg A, Lemons N (2015),
"Fast Generation of Sparse Random Kernel Graphs".
PLoS ONE 10(9): e0135177, 2015. doi:10.1371/journal.pone.0135177
"""
if not seed is None:
random.seed(seed)
if kernel_root is None:
import scipy.optimize as optimize
def kernel_root(y, a, r):
def my_function(b):
return kernel_integral(y, a, b) - r
return optimize.brentq(my_function, a, 1)
graph = nx.Graph()
graph.add_nodes_from(range(n))
(i, j) = (1, 1)
while i < n:
r = -math.log(1 - random.random()) # (1-random.random()) in (0, 1]
if kernel_integral(i/n, j/n, 1) <= r:
i, j = i+1, i+1
else:
j = int(math.ceil(n*kernel_root(i/n, j/n, r)))
graph.add_edge(i-1, j-1)
return graph
|
cmtm/networkx
|
networkx/generators/random_graphs.py
|
Python
|
bsd-3-clause
| 32,107
|
[
"VisIt"
] |
0d1badee8bbdebf807416c3d640536ecd381a26a778bd907b4b0196b791f68be
|
''' DIRAC.ResourceStatusSystem.Command package
'''
__RCSID__ = '$Id$'
|
andresailer/DIRAC
|
ResourceStatusSystem/Command/__init__.py
|
Python
|
gpl-3.0
| 71
|
[
"DIRAC"
] |
788ad4f66c9fe49be9d2e5f7fc16b11cbf32cac610371c09fc006bf85203b1d1
|
#!/bin/env python
""" Exit example: people doing things, when arriving at an exit/border node
- random movement
- if person arrives at any exit node placed at the map borders,
it sleeps for a while, changes its color and moves on
- uses person.act_at_node() and and location/exit
- output to visual player, which is executed as child process
"""
import sys
sys.path.append("..")
from mosp.core import Simulation, Person
from mosp.geo import osm
from mosp.impl import movement
from mosp.locations import Location
from mosp.monitors import ChildprocessPlayerChamplainMonitor, SocketPlayerMonitor
__author__ = "B. Henne"
__contact__ = "henne@dcsec.uni-hannover.de"
__copyright__ = "(c) 2011, DCSec, Leibniz Universitaet Hannover, Germany"
__license__ = "GPLv3"
COLORS = [
(0.1,0.1,0.9,1.0), # blue
(0.9,0.1,0.1,1.0), # red
(0.1,0.9,0.1,1.0), # green
(0.5,0.0,0.5,1.0), # purple
(0.0,1.0,1.0,1.0), # aqua
(0.6,0.6,0.0,1.0), # olive
(0.5,0.5,0.5,1.0), # grey
(0.0,0.0,0.0,1.0) # black
] #: blue, red, green, purple, aqua, olive, grey, black
class WigglerExit(Location):
"""The demo exit location.
People entering this location/exit will change their color,
sleep, wake up and move on.
@author: B. Henne"""
def __init__(self, name, sim):
"""Inits the demo exit location."""
super(WigglerExit, self).__init__(name=name, sim=sim)
def interact(self, person, duration=600):
"""Wiggler interacting with this location sleeps for duration seconds and changes his color."""
person.p_color = (person.p_color + 1) % 5
person.p_color_rgba = COLORS[person.p_color]
self.visit(person, duration)
class ExitWiggler(Person):
"""Demo wiggler for acting at border nodes / at exits."""
next_target = movement.person_next_target_random
def act_at_node(self, node):
"""Wiggler acts at WigglerExit."""
worldobject = node.worldobject
if worldobject is not None:
if isinstance(worldobject, WigglerExit):
self.passivate = True
self.passivate_with_stop_actions = True
worldobject.interact(self, 120)
def main():
"""Defines the simulation, map, monitors, persons and exits at border nodes."""
s = Simulation(geo=osm.OSMModel('../data/hannover2.osm'), rel_speed=60)
#m = s.add_monitor(ChildprocessPlayerChamplainMonitor, 2)
m = s.add_monitor(SocketPlayerMonitor, 2)
s.add_persons(ExitWiggler, 20, monitor=m)
exits = [node for node in s.geo.way_nodes if "border" in node.tags]
exit = WigglerExit('theExit', s)
s.activate(exit, exit.serve(), 0)
for e in exits:
e.worldobject = exit
s.run(until=10000, real_time=True, monitor=True)
if __name__ == '__main__':
main()
|
bhenne/MoSP
|
mosp_examples/exit_wiggler.py
|
Python
|
gpl-3.0
| 2,930
|
[
"VisIt"
] |
88a609a4f1fa8817d9ed3cf7ed11238e0aa2aeb1f99ea696bd3b1af4d3446f7f
|
# Do some preliminary analysis on the results of the DECaLS-Galaxy Zoo data.
from astropy.io import fits
from astropy.cosmology import WMAP9
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
from collections import Counter
import numpy as np
import re
import pandas as pd
from gz_class import plurality
gzpath = '/Users/willettk/Astronomy/Research/GalaxyZoo'
decals_path = '{0}/decals'.format(gzpath)
plot_path = '{0}/plots'.format(decals_path)
"""
decals_analysis
=========
Make plots and start analysis on the collated Galaxy Zoo-DECaLS data.
Kyle Willett (UMN) - 10 Dec 2015
"""
def load_data():
mgs = fits.getdata('{0}/matched/gz2_main.fits'.format(decals_path),1)
s82 = fits.getdata('{0}/matched/gz2_s82_coadd1.fits'.format(decals_path),1)
decals = fits.getdata('{0}/matched/decals_dr1.fits'.format(decals_path),1)
return mgs,s82,decals
def color_mag_plots(mgs,s82,decals,savefig=False):
# Make paneled histograms of the color distribution for several magnitude bins of Galaxy Zoo data.
"""
SDSS main sample (GZ2)
Stripe 82 coadded data (GZ2)
DECaLS
"""
redshifts = (0.12,0.08,0.05)
appmag_lim = 17.0
# Work out the magnitude limit from cosmology
fig,axarr = plt.subplots(num=1,nrows=3,ncols=3,figsize=(12,10))
for z,ax in zip(redshifts,axarr.ravel()):
absmag_lim = appmag_lim - WMAP9.distmod(z).value
maglim = (mgs['PETROMAG_MR'] < absmag_lim) & (mgs['REDSHIFT'] <= z)
spiral = mgs['t01_smooth_or_features_a02_features_or_disk_weighted_fraction'] >= 0.8
elliptical = mgs['t01_smooth_or_features_a01_smooth_weighted_fraction'] >= 0.8
ax.hist(mgs[maglim & spiral]['PETROMAG_U'] - mgs[maglim & spiral]['PETROMAG_R'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral')
ax.hist(mgs[maglim & elliptical]['PETROMAG_U'] - mgs[maglim & elliptical]['PETROMAG_R'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical')
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16)
ax.text(0.95,0.95,'MGS',ha='right',va='top',transform=ax.transAxes)
if ax == axarr.ravel()[0]:
ax.legend(loc='upper left',fontsize=10)
s82_lim = 17.77
for z,ax in zip(redshifts,axarr.ravel()[3:6]):
absmag_lim = s82_lim - WMAP9.distmod(z).value
maglim = (s82['PETROMAG_MR'] < absmag_lim) & (s82['REDSHIFT'] <= z)
spiral = s82['t01_smooth_or_features_a02_features_or_disk_weighted_fraction'] >= 0.8
elliptical = s82['t01_smooth_or_features_a01_smooth_weighted_fraction'] >= 0.8
ax.hist(s82[maglim & spiral]['PETROMAG_U'] - s82[maglim & spiral]['PETROMAG_R'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral')
ax.hist(s82[maglim & elliptical]['PETROMAG_U'] - s82[maglim & elliptical]['PETROMAG_R'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical')
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16)
ax.text(0.95,0.95,'Stripe 82',ha='right',va='top',transform=ax.transAxes)
decals_lim = 17.77
for z,ax in zip(redshifts,axarr.ravel()[6:]):
absmag_lim = decals_lim - WMAP9.distmod(z).value
maglim = (decals['metadata.mag.abs_r'] < absmag_lim) & (decals['metadata.redshift'] <= z)
spiral = decals['t00_smooth_or_features_a1_features_frac'] >= 0.8
elliptical = decals['t00_smooth_or_features_a0_smooth_frac'] >= 0.8
ax.hist(decals[maglim & spiral]['metadata.mag.u'] - decals[maglim & spiral]['metadata.mag.r'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral')
ax.hist(decals[maglim & elliptical]['metadata.mag.u'] - decals[maglim & elliptical]['metadata.mag.r'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical')
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16)
ax.text(0.95,0.95,'DECaLS',ha='right',va='top',transform=ax.transAxes)
fig.tight_layout()
if savefig:
plt.savefig('{0}/color_hist.pdf'.format(plot_path))
else:
plt.show()
return None
def color_mag_ratio(mgs,s82,decal,savefig=False):
# Plot the spiral to elliptical ratio as a function of optical color.
redshifts = (0.12,0.08,0.05)
linestyles = ('solid','dashed','dashdot')
datasets = ({'data':mgs,
'title':'MGS',
'appmag':17.0,
'sp':'t01_smooth_or_features_a02_features_or_disk_weighted_fraction',
'el':'t01_smooth_or_features_a01_smooth_weighted_fraction',
'umag':'PETROMAG_U',
'rmag':'PETROMAG_R',
'absr':'PETROMAG_MR',
'redshift':'REDSHIFT'},
{'data':s82,
'title':'Stripe 82',
'appmag':17.77,
'sp':'t01_smooth_or_features_a02_features_or_disk_weighted_fraction',
'el':'t01_smooth_or_features_a01_smooth_weighted_fraction',
'umag':'PETROMAG_U',
'rmag':'PETROMAG_R',
'absr':'PETROMAG_MR',
'redshift':'REDSHIFT'},
{'data':decals,
'title':'DECaLS',
'appmag':17.77,
'sp':'t00_smooth_or_features_a1_features_frac',
'el':'t00_smooth_or_features_a0_smooth_frac',
'umag':'metadata.mag.u',
'rmag':'metadata.mag.r',
'absr':'metadata.mag.abs_r',
'redshift':'metadata.redshift'})
# Work out the magnitude limit from cosmology
fig,axarr = plt.subplots(num=2,nrows=1,ncols=3,figsize=(12,5))
for ax,d in zip(axarr.ravel(),datasets):
for z,ls in zip(redshifts,linestyles):
absmag_lim = d['appmag'] - WMAP9.distmod(z).value
maglim = (d['data'][d['absr']] < absmag_lim) & (d['data'][d['redshift']] <= z)
spiral = d['data'][d['sp']] >= 0.8
elliptical = d['data'][d['el']] >= 0.8
n_sp,bins_sp = np.histogram(d['data'][maglim & spiral][d['umag']] - d['data'][maglim & spiral][d['rmag']],range=(0,4),bins=25)
n_el,bins_el = np.histogram(d['data'][maglim & elliptical][d['umag']] - d['data'][maglim & elliptical][d['rmag']],range=(0,4),bins=25)
plotval = np.log10(n_sp * 1./n_el)
ax.plot(bins_sp[1:],plotval,linestyle=ls,label=r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z))
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_ylabel(r'$\log(n_{sp}/n_{el})$',fontsize=16)
ax.set_ylim(-1.5,1.5)
ax.set_title(d['title'],fontsize=16)
if ax == axarr.ravel()[0]:
ax.legend(loc='upper left',fontsize=8)
fig.tight_layout()
if savefig:
plt.savefig('{0}/feature_ratio.pdf'.format(plot_path))
else:
plt.show()
return None
def feature_comparison(savefig=False):
# Plot the difference in vote fractions for the matched galaxies
filename = '{0}/fits/decals_gz2_union.fits'.format(decals_path)
data = fits.getdata(filename,1)
# Map the columns
matched_cols = [{'title':'smooth', 'gz2':"gz2_t01_smooth_or_features_a01_smooth_fraction", "decals":"decals_t00_smooth_or_features_a0_smooth_frac"},
{'title':'features/disk', 'gz2':"gz2_t01_smooth_or_features_a02_features_or_disk_fraction", "decals":"decals_t00_smooth_or_features_a1_features_frac"},
{'title':'star', 'gz2':"gz2_t01_smooth_or_features_a03_star_or_artifact_fraction", "decals":"decals_t00_smooth_or_features_a2_artifact_frac"},
{'title':'edge-on', 'gz2':"gz2_t02_edgeon_a04_yes_fraction", "decals":"decals_t01_disk_edge_on_a0_yes_frac"},
{'title':'not edge-on', 'gz2':"gz2_t02_edgeon_a05_no_fraction", "decals":"decals_t01_disk_edge_on_a1_no_frac"},
{'title':'bar', 'gz2':"gz2_t03_bar_a06_bar_fraction", "decals":"decals_t02_bar_a0_bar_frac"},
{'title':'no bar', 'gz2':"gz2_t03_bar_a07_no_bar_fraction", "decals":"decals_t02_bar_a1_no_bar_frac"},
{'title':'spiral', 'gz2':"gz2_t04_spiral_a08_spiral_fraction", "decals":"decals_t03_spiral_a0_spiral_frac"},
{'title':'no spiral', 'gz2':"gz2_t04_spiral_a09_no_spiral_fraction", "decals":"decals_t03_spiral_a1_no_spiral_frac"},
{'title':'no bulge', 'gz2':"gz2_t05_bulge_prominence_a10_no_bulge_fraction", "decals":"decals_t04_bulge_prominence_a0_no_bulge_frac"},
{'title':'medium bulge', 'gz2':"gz2_t05_bulge_prominence_a11_just_noticeable_fraction", "decals":"decals_t04_bulge_prominence_a1_obvious_frac"},
{'title':'obvious bulge', 'gz2':"gz2_t05_bulge_prominence_a12_obvious_fraction", "decals":"decals_t04_bulge_prominence_a2_dominant_frac"},
{'title':'completely round', 'gz2':"gz2_t07_rounded_a16_completely_round_fraction", "decals":"decals_t08_rounded_a0_completely_round_frac"},
{'title':'in between', 'gz2':"gz2_t07_rounded_a17_in_between_fraction", "decals":"decals_t08_rounded_a1_in_between_frac"},
{'title':'cigar shaped', 'gz2':"gz2_t07_rounded_a18_cigar_shaped_fraction", "decals":"decals_t08_rounded_a2_cigar_shaped_frac"},
{'title':'ring', 'gz2':"gz2_t08_odd_feature_a19_ring_fraction", "decals":"decals_t10_odd_feature_x1_ring_frac"},
{'title':'lens/arc', 'gz2':"gz2_t08_odd_feature_a20_lens_or_arc_fraction", "decals":"decals_t10_odd_feature_x2_lens_frac"},
{'title':'irregular', 'gz2':"gz2_t08_odd_feature_a22_irregular_fraction", "decals":"decals_t10_odd_feature_x4_irregular_frac"},
{'title':'other', 'gz2':"gz2_t08_odd_feature_a23_other_fraction", "decals":"decals_t10_odd_feature_x5_other_frac"},
{'title':'dust lane', 'gz2':"gz2_t08_odd_feature_a38_dust_lane_fraction", "decals":"decals_t10_odd_feature_x3_dustlane_frac"},
{'title':'rounded bulge', 'gz2':"gz2_t09_bulge_shape_a25_rounded_fraction", "decals":"decals_t07_bulge_shape_a0_rounded_frac"},
{'title':'boxy bulge', 'gz2':"gz2_t09_bulge_shape_a26_boxy_fraction", "decals":"decals_t07_bulge_shape_a1_boxy_frac"},
{'title':'no bulge', 'gz2':"gz2_t09_bulge_shape_a27_no_bulge_fraction", "decals":"decals_t07_bulge_shape_a2_no_bulge_frac"},
{'title':'tight arms', 'gz2':"gz2_t10_arms_winding_a28_tight_fraction", "decals":"decals_t05_arms_winding_a0_tight_frac"},
{'title':'medium arms', 'gz2':"gz2_t10_arms_winding_a29_medium_fraction", "decals":"decals_t05_arms_winding_a1_medium_frac"},
{'title':'loose arms', 'gz2':"gz2_t10_arms_winding_a30_loose_fraction", "decals":"decals_t05_arms_winding_a2_loose_frac"},
{'title':'1 arm', 'gz2':"gz2_t11_arms_number_a31_1_fraction", "decals":"decals_t06_arms_number_a0_1_frac"},
{'title':'2 arms', 'gz2':"gz2_t11_arms_number_a32_2_fraction", "decals":"decals_t06_arms_number_a1_2_frac"},
{'title':'3 arms', 'gz2':"gz2_t11_arms_number_a33_3_fraction", "decals":"decals_t06_arms_number_a2_3_frac"},
{'title':'4 arms', 'gz2':"gz2_t11_arms_number_a34_4_fraction", "decals":"decals_t06_arms_number_a3_4_frac"},
{'title':'5+ arms', 'gz2':"gz2_t11_arms_number_a36_more_than_4_fraction", "decals":"decals_t06_arms_number_a4_more_than_4_frac"}]
# Working, but still needs to sort for questions that are ACTUALLY ANSWERED. Lots of pileup at 0,0.
columns = data.columns
decals_fraccols,gz2_fraccols = [],[]
for c in columns:
colname = c.name
if len(colname) > 6:
if colname[-4:] == 'frac' and colname[:6] == 'decals':
decals_fraccols.append(c)
if len(colname) > 17:
if colname[-8:] == 'fraction' and colname[-17:] != "weighted_fraction" and colname[:3] == 'gz2':
gz2_fraccols.append(c)
decals_votearr = data.from_columns(decals_fraccols)
gz2_votearr = data.from_columns(gz2_fraccols)
decals_tasks,gz2_tasks = [],[]
for v in decals_votearr:
e_decals,a_decals = plurality(np.array(list(v)),'decals')
decals_tasks.append(e_decals)
for v in gz2_votearr:
e_gz2,a_gz2 = plurality(np.array(list(v)),'gz2')
gz2_tasks.append(e_gz2)
fig,axarr = plt.subplots(num=1,nrows=4,ncols=8,figsize=(16,10))
nrows = axarr.shape[0]
ncols = axarr.shape[1]
def plot_features(ax,taskno,indices):
plotind = indices.flatten()
ax.hist2d(data[matched_cols[taskno]['gz2']][plotind],data[matched_cols[taskno]['decals']][plotind],bins=(20,20),range=[[0,1],[0,1]],norm=LogNorm(),cmap = cm.viridis)
ax.plot([0,1],[0,1],linestyle='--',color='red',lw=2)
ax.set_title(matched_cols[taskno]['title'],fontsize=8)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$f_{GZ2}$',fontsize=10)
ax.set_ylabel(r'$f_{DECaLS}$',fontsize=10)
ax.set_aspect('equal')
# Smooth/features
answers_per_task = [3,2,2,2,3,3,5,3,3,5]
match_tasks = [[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3],
[ 4, 4],
[ 6, 8],
[ 7,10],
[ 8, 7],
[ 9, 5],
[10, 6]]
n = 0
for a,m in zip(answers_per_task,match_tasks):
inds = np.array(([np.array(decals_tasks)[:,m[1]] == True])) & np.array(([np.array(gz2_tasks)[:,m[0]] == True]))
for i in range(a):
plot_features(axarr.ravel()[n],n,inds)
n += 1
'''
for i in range(nrows):
ax = axarr.ravel()[i*ncols]
ax.set_ylabel(r'$f_{GZ2}$',fontsize=10)
for i in range(ncols):
ax = axarr.ravel()[(nrows - 1)*ncols + i]
ax.set_xlabel(r'$f_{DECaLS}$',fontsize=10)
'''
for di in range((nrows*ncols)-n):
fig.delaxes(axarr.ravel()[(nrows*ncols)-(di+1)])
fig.tight_layout()
if savefig:
plt.savefig('{0}/decals_gz2_feature_comparison.pdf'.format(plot_path))
else:
plt.show()
return None
def survey_dict():
# Information about the specific group settings in the project
d = {u'candels': {'name':u'CANDELS','retire_limit':80},
u'candels_2epoch': {'name':u'CANDELS 2-epoch','retire_limit':80},
u'decals': {'name':u'DECaLS','retire_limit':40},
u'ferengi': {'name':u'FERENGI','retire_limit':40},
u'goods_full': {'name':u'GOODS full-depth','retire_limit':40},
u'illustris': {'name':u'Illustris','retire_limit':40},
u'sloan_singleband':{'name':u'SDSS single-band','retire_limit':40},
u'ukidss': {'name':u'UKIDSS','retire_limit':40},
#u'sloan': {'name':u'SDSS DR8','retire_limit':60},
u'stripe82': {'name':u'Stripe 82','retire_limit':40},
u'gz2': {'name':u'SDSS DR7','retire_limit':40}}
return d
def is_number(s):
# Is a string a representation of a number?
try:
int(s)
return True
except ValueError:
return False
def morphology_distribution(survey='decals'):
# What's the plurality distribution of morphologies?
try:
collation_file = "{0}/gz_reduction_sandbox/data/decals_unweighted_classifications_00.csv".format(gzpath)
collated = pd.read_csv(collation_file)
except IOError:
print "Collation file for {0:} does not exist. Aborting.".format(survey)
return None
columns = collated.columns
fraccols,colnames = [],[]
for c in columns:
if c[-4:] == 'frac':
fraccols.append(c)
if c[0] == 't' and is_number(c[1:3]):
colnames.append(c[:3])
collist = list(set(colnames))
collist.sort()
# Plot distribution of vote fractions for each task
ntasks = len(collist)
ncols = 4 if ntasks > 9 else int(np.sqrt(ntasks))
nrows = int(ntasks / ncols) if ntasks % ncols == 0 else int(ntasks / ncols) + 1
sd = survey_dict()[survey]
survey_name = sd['name']
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
tasklabels = f7([re.split("[ax][0-9]",f)[0][11:-1] for f in fraccols])
labels = [re.split("[ax][0-9]",f)[-1][1:-5] for f in fraccols]
# Make pie charts of the plurality votes
votearr = np.array(collated[fraccols])
class_arr,task_arr,task_ans = [],[],[]
for v in votearr:
e,a = plurality(v,survey)
task_arr.append(e)
task_ans.append(a)
task_arr = np.array(task_arr)
task_ans = np.array(task_ans)
fig,axarr = plt.subplots(nrows=nrows,ncols=ncols,figsize=(15,12))
colors=[u'#377EB8', u'#E41A1C', u'#4DAF4A', u'#984EA3', u'#FF7F00',u'#A6761D',u'#1B9E77']
n = (task_arr.shape)[1]
for i in range(n):
ax = axarr.ravel()[i]
c = Counter(task_ans[:,i][task_arr[:,i] == True])
pv,pl = [],[]
for k in c:
pv.append(c[k])
pl.append(labels[k])
ax.pie(pv,labels=pl,colors=colors,autopct=lambda(p): '{:.0f}'.format(p * sum(pv) / 100))
title = '{0:} - t{1:02} {2:}'.format(survey_name,i,tasklabels[i]) if i == 0 else 't{0:02} {1:}'.format(i,tasklabels[i])
ax.set_title(title)
ax.set_aspect('equal')
# Remove empty axes from subplots
if axarr.size > ntasks:
for i in range(axarr.size - ntasks):
ax = axarr.ravel()[axarr.size-(i+1)]
ax.set_axis_off()
fig.set_tight_layout(True)
plt.savefig('{1}/decals/plots/pie_{0:}.eps'.format(survey,gzpath))
plt.close()
return None
def morph_table_gz2():
# Print LaTeX-formatted tables of the GZ vote counts and fractions, and plot as pie chart.
overlap = True
survey = 'decals'
# Get weights
try:
fitsfile = "{0}/dr10/dr10_gz2_main_specz.fits".format(gzpath)
hdr = fits.getheader(fitsfile,1)
colnames = []
for i in range(hdr['TFIELDS']):
colnames.append(hdr['TTYPE{0}'.format(i+1)])
if overlap:
if survey == 'gz2':
collation_file = "{0}/decals/csv/decals_gz2_main.csv".format(gzpath)
elif survey == 'stripe82':
collation_file = "{0}/decals/csv/decals_gz2_stripe82c1.csv".format(gzpath)
elif survey == 'decals':
collation_file = "{0}/decals/csv/decals_gz2_union.csv".format(gzpath)
collated = pd.read_csv(collation_file)
else:
if survey == 'gz2':
collation_file = "{0}/dr10/dr10_gz2_main_specz.csv".format(gzpath)
elif survey == 'stripe82':
collation_file = "{0}/dr10/dr10_gz2_stripe82_coadd1.csv".format(gzpath)
collated = pd.read_csv(collation_file,names=colnames)
except IOError:
print "Collation file for {0:} does not exist. Aborting.".format(survey)
return None
columns = collated.columns
fraccols,colnames = [],[]
if survey == 'decals':
for c in columns:
if len(c) > 10:
if c[-4:] == 'frac' and c[:6] == 'decals':
fraccols.append(c)
if c[7] == 't' and is_number(c[8:10]):
colnames.append(c[7:10])
else:
for c in columns:
if c[-17:] == 'weighted_fraction':
fraccols.append(c)
if c[0] == 't' and is_number(c[1:3]):
colnames.append(c[:3])
collist = list(set(colnames))
collist.sort()
# Plot distribution of vote fractions for each task
ntasks = len(collist)
ncols = 4 if ntasks > 9 else int(np.sqrt(ntasks))
nrows = int(ntasks / ncols) if ntasks % ncols == 0 else int(ntasks / ncols) + 1
sd = survey_dict()[survey]
survey_name = sd['name']
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
if survey == 'decals':
tasklabels = f7([re.split("[ax][0-9]",f)[0][11:-1] for f in fraccols])
labels = [re.split("[ax][0-9]",f)[-1][1:-5] for f in fraccols]
else:
tasklabels = f7([re.split("[ax][0-9]",f)[0][4:-1] for f in fraccols])
labels = [re.split("[ax][0-9]",f[4:-18])[-1][2:] for f in fraccols]
# Make pie charts of the plurality votes
votearr = np.array(collated[fraccols])
class_arr,task_arr,task_ans = [],[],[]
for v in votearr:
e,a = plurality(v,survey)
task_arr.append(e)
task_ans.append(a)
task_arr = np.array(task_arr)
task_ans = np.array(task_ans)
fig,axarr = plt.subplots(nrows=nrows,ncols=ncols,figsize=(15,12))
colors=[u'#377EB8', u'#E41A1C', u'#4DAF4A', u'#984EA3', u'#FF7F00',u'#A6761D',u'#1B9E77']
n = (task_arr.shape)[1]
for i in range(n):
ax = axarr.ravel()[i]
c = Counter(task_ans[:,i][task_arr[:,i] == True])
pv,pl = [],[]
task_total = sum(c.values())
for k in c:
pv.append(c[k])
pl.append(labels[k])
# Print to screen in LaTeX format
print "{0:20} & {1:6} & {3:.2f} & {2:.2f}".format(labels[k],c[k],c[k] * 1./task_total,c[k] * 1./len(collated))
print ""
ax.pie(pv,labels=pl,colors=colors,autopct=lambda(p): '{:.0f}'.format(p * sum(pv) / 100))
title = '{0:} - t{1:02} {2:}'.format(survey_name,i,tasklabels[i]) if i == 0 else 't{0:02} {1:}'.format(i,tasklabels[i])
ax.set_title(title)
ax.set_aspect('equal')
# Remove empty axes from subplots
if axarr.size > ntasks:
for i in range(axarr.size - ntasks):
ax = axarr.ravel()[axarr.size-(i+1)]
ax.set_axis_off()
fig.set_tight_layout(True)
suffix = '_overlap' if overlap else ''
plt.savefig('{1}/decals/plots/pie_{0}{2}.eps'.format(survey,gzpath,suffix))
plt.close()
return None
if __name__ == "__main__":
mgs,s82,decals = load_data()
#color_mag_plots(mgs,s82,decals,savefig=True)
#color_mag_ratio(mgs,s82,decals,savefig=True)
#feature_comparison(savefig=True)
|
willettk/decals
|
python/decals_analysis.py
|
Python
|
mit
| 23,642
|
[
"Galaxy"
] |
97b6f92100f9ac85e1649ca3013960db56e372253e52c1f5697392a131fa969c
|
import numpy as np
import os
import pyfits
import pymconvolve
from rotate import ImSec
class clumpness:
"""The clumpness parameter and the algorithm used as follows
1. The image is smoothed by a boxcar of width
0.25 * r(Petrosian parameter = 0.2)
2. The smoothness is computed with the radius 1.5 by using
S = 10 * Sum(I_0 - I_S) / Sum(I_0)
where I_0 is the galaxy pixels and I_S that of smoothed image
3. Compute the average smoothness of the background and subtract
from S.
4. The inner region of the galaxy is not considered in the
computation of S as these are often unresolved.
5. Use only the positive pixels for the computation."""
def __init__(self, z, xcntr, ycntr, pa, eg, ext_rad, sigma, sky, flag_image):
self.z = z
self.xcntr = xcntr
self.ycntr = ycntr
self.ext_rad = ext_rad
self.pa = pa
self.eg = eg
self.sky = sky
self.flag_image = flag_image
self.sigma = np.int(sigma) #the size of the boxcar
self.image_clumpness = CLUMPNESS(self.z, self.xcntr, self.ycntr, \
self.pa, self.eg, self.sky, self.ext_rad, \
self.sigma, self.flag_image)
def CLUMPNESS(z, xcntr, ycntr, pa, eg, sky, ext_rad, sigma, flag_image):
CutImDa, cut_xcntr, cut_ycntr, SizeY, SizeX, ymin, ymax, xmin, \
xmax, flag_out = ImSec(z, xcntr, ycntr, ext_rad)
#print NXPTS, NYPTS,xcntr,ycntr
co = np.cos(pa * np.pi / 180.0)
si = np.sin(pa * np.pi / 180.0)
one_minus_eg_sq = (1.0 - eg)**2.0
x = np.reshape(np.arange(SizeX * SizeY), (SizeY, SizeX)) % SizeX
x = x.astype(np.float32)
y = np.reshape(np.arange(SizeX * SizeY), (SizeY, SizeX)) / SizeX
y = y.astype(np.float32)
tx = (x - cut_xcntr) * co + (y - cut_ycntr) * si
ty = (cut_xcntr - x) * si + (y - cut_ycntr) * co
R = np.sqrt(tx**2.0 + ty**2.0 / one_minus_eg_sq)
boxcar = np.reshape(np.ones(sigma * sigma), (sigma, sigma))
I_sigma = convolve.Convolve(CutImDa, boxcar)
res = CutImDa - I_sigma #the residual image
if(flag_image):
# the below will find the image portion which is an anulus of inner
# radius 0.3 * eta(.2) and outer radius 1.5 * eta(.2)
# making the residual value equal to zero inside the
# ext_rad/20)
res[R <= ext_rad * (1 / 20.0)] = 0
res[R >= ext_rad] = 0
os.system('rm -f clumres.fits')
hdu = pyfits.PrimaryHDU(res)
hdu.writeto('clumres.fits')
# sum of positive values of residue
res_inside_anulus_sum = res[res > 0].sum()
# Average inside ext_rad. res_inside_anulus_sum will be divided by
# z_inside_R_sum in casgm module
z_inside_R_sum = CutImDa[R <= ext_rad].sum() / (3.14 * \
ext_rad * ext_rad * np.sqrt(1 - eg**2.0))
# FIX I dont know why 1/6 instead of 1/20.
area = 3.14 * (ext_rad * ext_rad * np.sqrt(1 - eg**2.0)) - \
3.14 * (ext_rad * ext_rad * (1 / 6.0) * (1 / 6.0) * \
np.sqrt(1 - eg**2.0))
# END
S = res_inside_anulus_sum / area
e1sq = CutImDa[res > 0].sum() + I_sigma[res > 0].sum() + \
4. * CutImDa[res > 0].size * sky
else:
res[R >= ext_rad] = 0
os.system('rm -f clumres.fits')
hdu = pyfits.PrimaryHDU(res)
hdu.writeto('clumres.fits')
res_inside_anulus_sum = res[np.where(res > 0)].sum()
area = 3.14 * ext_rad**2.0
S = res_inside_anulus_sum / area
z_inside_R_sum = 0 # just to return the value in the end
e1sq = CutImDa[res > 0].sum() + I_sigma[res > 0].sum() + \
2. * CutImDa[res > 0].size * sky
e2sq = res_inside_anulus_sum**2.0
e3sq = CutImDa[R <= ext_rad].sum() + 2 * CutImDa[R <= ext_rad].size * sky
e4sq = (CutImDa[R <= ext_rad].sum())**2.0
print S
if(e2sq!=0):
error = e1sq / e2sq
else:
print "Could not find error"
error = 0.0
return S, error, z_inside_R_sum, e3sq, e4sq
f=pyfits.open('n5585_lR.fits')
z=f[0].data
f.close()
z = z - 1390.0
#clumpness(z, 192.03,157.42, 0, 0, 100, 20, 1390., 1) #image
clumpness(z, 50, 50, 0, 0, 15, 20, 1390.0, 0) #background
|
vvinuv/pymorph
|
test/clumfunc.py
|
Python
|
gpl-2.0
| 4,436
|
[
"Galaxy"
] |
6c09abc48d1ebfc7f15541622055d3d4700b201779b586195160ba5b72a3f814
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
)
from google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service import (
VideoIntelligenceServiceClient,
)
from google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service import (
transports,
)
from google.cloud.videointelligence_v1p3beta1.types import video_intelligence
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VideoIntelligenceServiceClient._get_default_mtls_endpoint(None) is None
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VideoIntelligenceServiceGrpcTransport, "grpc"),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_video_intelligence_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_client_get_transport_class():
transport = VideoIntelligenceServiceClient.get_transport_class()
available_transports = [
transports.VideoIntelligenceServiceGrpcTransport,
]
assert transport in available_transports
transport = VideoIntelligenceServiceClient.get_transport_class("grpc")
assert transport == transports.VideoIntelligenceServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
def test_video_intelligence_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"true",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"false",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_video_intelligence_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
def test_video_intelligence_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_video_intelligence_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_video_intelligence_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_video_intelligence_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VideoIntelligenceServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_video_intelligence_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"videointelligence.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="videointelligence.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [video_intelligence.AnnotateVideoRequest, dict,]
)
def test_annotate_video(request_type, transport: str = "grpc"):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_annotate_video_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
client.annotate_video()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
@pytest.mark.asyncio
async def test_annotate_video_async(
transport: str = "grpc_asyncio",
request_type=video_intelligence.AnnotateVideoRequest,
):
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_annotate_video_async_from_dict():
await test_annotate_video_async(request_type=dict)
def test_annotate_video_flattened():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
def test_annotate_video_flattened_error():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
@pytest.mark.asyncio
async def test_annotate_video_flattened_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
@pytest.mark.asyncio
async def test_annotate_video_flattened_error_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VideoIntelligenceServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.VideoIntelligenceServiceGrpcTransport,
)
def test_video_intelligence_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_video_intelligence_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("annotate_video",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_video_intelligence_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_video_intelligence_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport()
adc.assert_called_once()
def test_video_intelligence_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VideoIntelligenceServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VideoIntelligenceServiceGrpcTransport, grpc_helpers),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_video_intelligence_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"videointelligence.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="videointelligence.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_video_intelligence_service_host_no_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com"
),
)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_host_with_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com:8000"
),
)
assert client.transport._host == "videointelligence.googleapis.com:8000"
def test_video_intelligence_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_video_intelligence_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_video_intelligence_service_grpc_lro_client():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_video_intelligence_service_grpc_lro_async_client():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VideoIntelligenceServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = VideoIntelligenceServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = VideoIntelligenceServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = VideoIntelligenceServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = VideoIntelligenceServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = VideoIntelligenceServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = VideoIntelligenceServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = VideoIntelligenceServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VideoIntelligenceServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = VideoIntelligenceServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VideoIntelligenceServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-videointelligence
|
tests/unit/gapic/videointelligence_v1p3beta1/test_video_intelligence_service.py
|
Python
|
apache-2.0
| 56,765
|
[
"Octopus"
] |
cc8ca174fb9fa0a0427be4b5fb9e62220ae1f2db3b2e0b4ccde629338508e2bc
|
#!/bin/python
"""
foregrounds.py
jlazear
1/20/15
Tools for constructing CMB foregrounds.
Long description
Example:
<example code here>
"""
__version__ = 20150120
__releasestatus__ = 'beta'
import inspect
import os
import numpy as np
from astropy.io import fits
import healpy as hp
import lib
# Path to the cmb/data/ directory. We'll need to read the Planck 353 GHz map.
datapath = os.path.dirname(os.path.abspath(inspect.getfile(lib))) + '/../data/'
planck353dustfname = datapath + 'COM_CompMap_dust-commrul_0256_R1.00.fits'
wmap23synchQfname = datapath + 'wmap_mcmc_k_synch_stk_q_7yr_v4p1.fits'
wmap23synchUfname = datapath + 'wmap_mcmc_k_synch_stk_u_7yr_v4p1.fits'
def generate_simple_dust_map(nu=353.e9, beta=1.6, Nside=256, clean_map=True,
n2r=False):
"""
Generates a dust thermal emission intensity map at the specified frequency
`nu`.
The generated map is a simple power law rescaling of the Planck 353 GHz
Commander-Ruler dust component map using a power law scaling factor with
index `beta` (=1.6 by default). Requires the Planck 353 GHz dust component
map to be available in the data directory,
cmb/data/COM_CompMap_dust-commrul_0256_R1.00.fits
The intensity is given by
I(p) = I_0(p)*(nu/nu0)^beta
where I_0(p) is the base thermal emission intensity map (Planck 353 GHz),
nu is the target frequency, nu0 = 353 GHz is the base map frequency,
and beta is the spectral index. By default, the Planck map (and thus this
map) is in the NESTED format.
The map is constructed base off of an Nside=256 map, but this function
will scale it to the desired Nside.
If `clean_map` is True, replaces pixels with negative values with 0.
If `n2r` is True, converts from the NESTED format to the RING format.
Returns a map of spectral intensity I_\nu in MJy/sr.
"""
with fits.open(planck353dustfname) as f:
map = f[1].data['I']
nu0 = 353.e9 # 353 GHz
factor = (nu/nu0)**beta
print "freq = {0} GHz, factor = {1}".format(nu/1.e9, factor) #DELME
map = factor*map
if Nside != 256:
map = hp.ud_grade(map, Nside, order_in='NESTED', order_out='NESTED')
if n2r:
map = hp.reorder(map, n2r=True)
if clean_map:
map = clean_simple_dust_map(map)
return map
def clean_simple_dust_map(map):
"""Replaces non-positive pixels with 0."""
map[map <= 0] = 0.
return map
def generate_simple_dust_QU_map(nu=353.e9, polfrac=0.2, beta=1.6, Nside=256,
clean_map=True, n2r=False):
"""
Generates simple Q and U dust maps at the specified frequency.
Each pixel is treated independently. An angle theta is randomly sampled
from a uniform distribution between 0 and 2pi. Then the Q and U
components are generated from the random angle,
theta sampled from Uniform(0, 2pi)
Q = p*I(nu, beta)*cos(theta)
U = p*I(nu, beta)*sin(theta)
where p is the polarization fraction `polfrac`, `nu` is the frequency of
the maps, and `beta` is the power law index used to construct the thermal
dust emission map.
Uses generate_simple_dust_map() to generate the map. See its docstring
for information about how the simple dust maps are constructed.
The map is constructed base off of an Nside=256 map, but this function
will scale it to the desired Nside.
If `clean_map` is True, replaces pixels with negative values with 0.
If `n2r` is True, converts from the NESTED format to the RING format.
Returns a 2 x N_pix ndarray. The first row (length 2 axis) is the Q map
and the second row is the U map. The maps are in units of spectral
intensity with units MJy/sr.
"""
imap = generate_simple_dust_map(nu=nu, beta=beta, Nside=Nside,
clean_map=clean_map, n2r=n2r)
QUmaps = np.empty([2, imap.shape[0]])
theta = np.random.rand(imap.shape[0])*2*np.pi
QUmaps[0] = polfrac*imap*np.cos(theta)
QUmaps[1] = polfrac*imap*np.sin(theta)
return QUmaps
def generate_polarization_angle_map(Qmap, Umap, sigma=None):
"""
Generates a polarization angle map from the specified Q and U maps.
The Q and U maps must be in RING format if `sigma` is not None. The Q and U
maps must have the same Nside.
If `sigma` is not None, then the maps are smoothed by a Gaussian with width
`sigma` (radians) before computing the angle.
Returns a single map of the same Nside as the input Q and U maps with the
polarization angle in radians, with -pi <= gamma < pi.
The angle follows the convention of Delabrouille et al. 2012,
gamma = (1/2)*arctan(-U, Q)
"""
if sigma is not None:
Qmap = hp.smoothing(Qmap, sigma=sigma, verbose=False)
Umap = hp.smoothing(Umap, sigma=sigma, verbose=False)
gamma = 0.5*np.arctan2(-Umap, Qmap)
return gamma
def generate_synchro_traced_dust_QU_map(nu=353.e9, polfrac=0.2, beta=1.6,
Nside=256, clean_map=True, n2r=False,
sigma=None):
"""
Generates a polarized dust intensity map at the specified frequency using
WMAP synchrotron data to determine the polarization angle.
The Q and U components are generated from the polarization angle according
to
Q = p*I(nu, beta)*cos(2*gamma)
U = p*I(nu, beta)*sin(2*gamma)
where p is the polarization fraction `polfrac`, `nu` is the frequency of
the maps, and `beta` is the power law index used to construct the thermal
dust emission map, and gamma is the polarization angle.
The polarization angle is determined from the WMAP 23 GHz synchrotron Q and
U maps according to
gamma = 0.5*arctan2(-U_23, Q_23)
following the convention of Delabrouille et al. 2012.
If `sigma` is not None, then the maps are smoothed by a Gaussian with width
`sigma` (radians) before computing the angle.
If `clean_map` is True, replaces pixels with negative values with 0.
If `n2r` is True, converts from the NESTED format to the RING format.
Returns a 2 x N_pix ndarray. The first row (length 2 axis) is the Q map
and the second row is the U map. The maps are in units of spectral
intensity with units MJy/sr.
"""
dust353map = generate_simple_dust_map(nu=nu, beta=beta, Nside=Nside,
clean_map=clean_map, n2r=n2r)
mapnames = [wmap23synchQfname, wmap23synchUfname]
wmapQ, wmapU = lib.wmap.load_wmap_maps_QU(mapnames, Nside=Nside, n2r=True)
gamma = lib.foregrounds.generate_polarization_angle_map(wmapQ, wmapU,
sigma=sigma)
if not n2r:
gamma = hp.reorder(gamma, r2n=True)
QUmaps = np.empty([2, gamma.shape[0]])
QUmaps[0] = polfrac*dust353map*np.cos(2*gamma)
QUmaps[1] = polfrac*dust353map*np.sin(2*gamma)
return QUmaps
|
jlazear/cmb
|
lib/foregrounds.py
|
Python
|
apache-2.0
| 7,013
|
[
"Gaussian"
] |
7bfdb859c591d17286980d2b9bc6437938d7e18be0f3f62039e26a8e5b8fb774
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El, time
m = 2000
n = 4000
numLambdas = 7
startLambda = 0
endLambda = 1
display = True
worldRank = El.mpi.WorldRank()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
firstLocalRow = A.FirstLocalRow()
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = firstLocalRow + sLoc
if s < width:
A.QueueLocalUpdate( sLoc, s, 11 )
if s >= 1 and s-1 < width:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if s+1 < width:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if s >= height and s-height < width:
A.QueueLocalUpdate( sLoc, s-height, -3 )
if s+height < width:
A.QueueLocalUpdate( sLoc, s+height, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.MakeConsistent()
return A
A = Rectang(m,n)
b = El.DistMultiVec()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.QPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
for j in xrange(0,numLambdas):
lambd = startLambda + j*(endLambda-startLambda)/(numLambdas-1.)
if worldRank == 0:
print "lambda =", lambd
startBPDN = time.clock()
x = El.BPDN( A, b, lambd, ctrl )
endBPDN = time.clock()
if worldRank == 0:
print "BPDN time: ", endBPDN-startBPDN
if display:
El.Display( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
e = El.DistMultiVec()
El.Copy( b, e )
El.SparseMultiply( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| A x - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
commSize = El.mpi.Size( El.mpi.COMM_WORLD() )
El.Finalize()
if commSize == 1:
raw_input('Press Enter to exit')
|
sg0/Elemental
|
examples/interface/BPDN.py
|
Python
|
bsd-3-clause
| 2,166
|
[
"Gaussian"
] |
edce604eeeebb9b3facc1733207c36650b531928de3b07fd2fce6f72aee72603
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.MDAnalysis.org
# Copyright (c) 2006-2015 Naveen Michaud-Agrawal, Elizabeth J. Denning, Oliver
# Beckstein and contributors (see AUTHORS for the full list)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pickle
from collections import namedtuple
import os
import sys
import string
import struct
import platform
import hypothesis.strategies as strategies
from hypothesis import example, given
import hypothesis
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_almost_equal)
from MDAnalysis.lib.formats.libdcd import DCDFile, DCD_IS_CHARMM, DCD_HAS_EXTRA_BLOCK
from MDAnalysisTests.datafiles import (
DCD, DCD_NAMD_TRICLINIC, legacy_DCD_ADK_coords, legacy_DCD_NAMD_coords,
legacy_DCD_c36_coords, DCD_TRICLINIC)
import pytest
@pytest.mark.parametrize("dcdfile, is_periodic",
[(DCD, False), (DCD_NAMD_TRICLINIC, True),
(DCD_TRICLINIC, True)])
def test_is_periodic(dcdfile, is_periodic):
with DCDFile(dcdfile) as f:
assert f.is_periodic == is_periodic
@pytest.mark.parametrize("dcdfile, natoms", [(DCD, 3341), (DCD_NAMD_TRICLINIC,
5545),
(DCD_TRICLINIC, 375)])
def test_read_coordsshape(dcdfile, natoms):
# confirm shape of coordinate data against result from previous
# MDAnalysis implementation of DCD file handling
with DCDFile(dcdfile) as dcd:
dcd_frame = dcd.read()
xyz = dcd_frame[0]
assert xyz.shape == (natoms, 3)
@pytest.mark.parametrize(
"dcdfile, unit_cell",
[(DCD, [0., 90., 0., 90., 90., 0.]),
(DCD_NAMD_TRICLINIC, [38.42659378, 0.499563, 38.393102, 0., 0., 44.7598]),
(DCD_TRICLINIC,
[30.841836, 14.578635, 31.780088, 9.626323, -2.60815, 32.67009])])
def test_read_unit_cell(dcdfile, unit_cell):
# confirm unit cell read against result from previous
# MDAnalysis implementation of DCD file handling
with DCDFile(dcdfile) as dcd:
dcd_frame = dcd.read()
assert_array_almost_equal(dcd_frame.unitcell, unit_cell)
def test_seek_over_max():
with DCDFile(DCD) as dcd:
with pytest.raises(EOFError):
dcd.seek(102)
@pytest.fixture
def dcd():
with DCDFile(DCD) as dcd:
yield dcd
def _assert_compare_readers(old_reader, new_reader):
# same as next(old_reader)
frame = old_reader.read()
# same as next(new_reader)
new_frame = new_reader.read()
assert old_reader.fname == new_reader.fname
assert old_reader.tell() == new_reader.tell()
assert_almost_equal(frame.xyz, new_frame.xyz)
assert_almost_equal(frame.unitcell, new_frame.unitcell)
def test_pickle(dcd):
mid = len(dcd) // 2
dcd.seek(mid)
new_dcd = pickle.loads(pickle.dumps(dcd))
_assert_compare_readers(dcd, new_dcd)
def test_pickle_last(dcd):
# This is the file state when DCDReader is in its last frame.
# (Issue #2878)
dcd.seek(len(dcd) - 1)
_ = dcd.read()
new_dcd = pickle.loads(pickle.dumps(dcd))
assert dcd.fname == new_dcd.fname
assert dcd.tell() == new_dcd.tell()
with pytest.raises(StopIteration):
new_dcd.read()
def test_pickle_closed(dcd):
dcd.seek(len(dcd) - 1)
dcd.close()
new_dcd = pickle.loads(pickle.dumps(dcd))
assert dcd.fname == new_dcd.fname
assert dcd.tell() != new_dcd.tell()
def test_pickle_after_read(dcd):
_ = dcd.read()
new_dcd = pickle.loads(pickle.dumps(dcd))
_assert_compare_readers(dcd, new_dcd)
def test_pickle_immediately(dcd):
new_dcd = pickle.loads(pickle.dumps(dcd))
assert dcd.fname == new_dcd.fname
assert dcd.tell() == new_dcd.tell()
@pytest.mark.parametrize("new_frame", (10, 42, 21))
def test_seek_normal(new_frame, dcd):
# frame seek within range is tested
dcd.seek(new_frame)
assert dcd.tell() == new_frame
def test_seek_negative(dcd):
with pytest.raises(IOError):
dcd.seek(-78)
def test_iteration(dcd):
num_iters = 10
for _ in range(num_iters):
dcd.__next__()
assert dcd.tell() == num_iters
def test_open_wrong_mode():
with pytest.raises(IOError):
DCDFile(DCD, 'e')
def test_raise_not_existing():
with pytest.raises(IOError):
DCDFile('foo')
def test_zero_based_frames_counting(dcd):
assert dcd.tell() == 0
@pytest.mark.parametrize("dcdfile, natoms", [(DCD, 3341), (DCD_NAMD_TRICLINIC,
5545),
(DCD_TRICLINIC, 375)])
def test_natoms(dcdfile, natoms):
with DCDFile(dcdfile) as dcd:
assert dcd.header['natoms'] == natoms
def test_read_closed(dcd):
dcd.close()
with pytest.raises(IOError):
dcd.read()
@pytest.mark.parametrize("dcdfile, nframes", [(DCD, 98), (DCD_NAMD_TRICLINIC,
1), (DCD_TRICLINIC,
10)])
def test_length_traj(dcdfile, nframes):
with DCDFile(dcdfile) as dcd:
assert len(dcd) == nframes
def test_read_write_mode_file(tmpdir):
fname = str(tmpdir.join('foo'))
with DCDFile(fname, 'w') as f:
with pytest.raises(IOError):
f.read()
def test_iterating_twice(dcd):
with dcd as f:
for i, _ in enumerate(f):
assert_equal(i + 1, f.tell())
# second iteration should work from start again
for i, _ in enumerate(f):
assert_equal(i + 1, f.tell())
DCD_HEADER = '''* DIMS ADK SEQUENCE FOR PORE PROGRAM * WRITTEN BY LIZ DENNING (6.2008) * DATE: 6/ 6/ 8 17:23:56 CREATED BY USER: denniej0 '''
DCD_NAMD_TRICLINIC_HEADER = 'Created by DCD pluginREMARKS Created 06 July, 2014 at 17:29Y5~CORD,'
DCD_TRICLINIC_HEADER = '* CHARMM TRICLINIC BOX TESTING * (OLIVER BECKSTEIN 2014) * BASED ON NPTDYN.INP : SCOTT FELLER, NIH, 7/15/95 * TEST EXTENDED SYSTEM CONSTANT PRESSURE AND TEMPERATURE * DYNAMICS WITH WATER BOX. * DATE: 7/ 7/14 13:59:46 CREATED BY USER: oliver '
@pytest.mark.parametrize("dcdfile, remarks",
((DCD, DCD_HEADER), (DCD_NAMD_TRICLINIC,
DCD_NAMD_TRICLINIC_HEADER),
(DCD_TRICLINIC, DCD_TRICLINIC_HEADER)))
def test_header_remarks(dcdfile, remarks):
# confirm correct header remarks section reading
with DCDFile(dcdfile) as f:
assert len(f.header['remarks']) == len(remarks)
@pytest.mark.parametrize("dcdfile, legacy_data, frames",
((DCD, legacy_DCD_ADK_coords, [5, 29]),
(DCD_NAMD_TRICLINIC, legacy_DCD_NAMD_coords, [0]),
(DCD_TRICLINIC, legacy_DCD_c36_coords, [1, 4])))
def test_read_coord_values(dcdfile, legacy_data, frames):
# test the actual values of coordinates read in versus
# stored values read in by the legacy DCD handling framework
# to reduce repo storage burden, we only compare for a few
# randomly selected frames
legacy = np.load(legacy_data)
with DCDFile(dcdfile) as dcd:
for index, frame_num in enumerate(frames):
dcd.seek(frame_num)
actual_coords = dcd.read()[0]
desired_coords = legacy[index]
assert_array_equal(actual_coords, desired_coords)
@pytest.mark.parametrize("dcdfile, legacy_data, frame_idx",
((DCD, legacy_DCD_ADK_coords, [5, 29]),
(DCD_NAMD_TRICLINIC, legacy_DCD_NAMD_coords, [0]),
(DCD_TRICLINIC, legacy_DCD_c36_coords, [1, 4])))
def test_readframes(dcdfile, legacy_data, frame_idx):
legacy = np.load(legacy_data)
with DCDFile(dcdfile) as dcd:
frames = dcd.readframes()
xyz = frames.xyz
assert_equal(len(xyz), len(dcd))
for index, frame_num in enumerate(frame_idx):
assert_array_almost_equal(xyz[frame_num], legacy[index])
def test_write_header(tmpdir):
# test that _write_header() can produce a very crude
# header for a new / empty file
testfile = str(tmpdir.join('test.dcd'))
with DCDFile(testfile, 'w') as dcd:
dcd.write_header(
remarks='Crazy!',
natoms=22,
istart=12,
nsavc=10,
delta=0.02,
is_periodic=1)
with DCDFile(testfile) as dcd:
header = dcd.header
assert header['remarks'] == 'Crazy!'
assert header['natoms'] == 22
assert header['istart'] == 12
assert header['is_periodic'] == 1
assert header['nsavc'] == 10
assert np.allclose(header['delta'], .02)
# we also check the bytes written directly.
with open(testfile, 'rb') as fh:
header_bytes = fh.read()
# check for magic number
assert struct.unpack('i', header_bytes[:4])[0] == 84
# magic number should be written again before remark section
assert struct.unpack('i', header_bytes[88:92])[0] == 84
# length of remark section. We hard code this to 244 right now
assert struct.unpack('i', header_bytes[92:96])[0] == 244
# say we have 3 block of length 80
assert struct.unpack('i', header_bytes[96:100])[0] == 3
# after the remark section the length should be reported again
assert struct.unpack('i', header_bytes[340:344])[0] == 244
# this is a magic number as far as I see
assert struct.unpack('i', header_bytes[344:348])[0] == 4
def test_write_no_header(tmpdir):
fname = str(tmpdir.join('test.dcd'))
with DCDFile(fname, 'w') as dcd:
with pytest.raises(IOError):
dcd.write(np.ones(3), np.ones(6))
def test_write_header_twice(tmpdir):
# an IOError should be raised if a duplicate
# header writing is attempted
header = {
"remarks": 'Crazy!',
"natoms": 22,
"istart": 12,
"nsavc": 10,
"delta": 0.02,
"is_periodic": 1
}
fname = str(tmpdir.join('test.dcd'))
with DCDFile(fname, 'w') as dcd:
dcd.write_header(**header)
with pytest.raises(IOError):
dcd.write_header(**header)
def test_write_header_wrong_mode(dcd):
# an exception should be raised on any attempt to use
# write_header with a DCDFile object in 'r' mode
with pytest.raises(IOError):
dcd.write_header(
remarks='Crazy!',
natoms=22,
istart=12,
nsavc=10,
delta=0.02,
is_periodic=1)
def test_write_mode(dcd):
# ensure that writing of DCD files only occurs with properly
# opened files
with pytest.raises(IOError):
dcd.write(xyz=np.zeros((3, 3)), box=np.zeros(6, dtype=np.float64))
def write_dcd(in_name, out_name, remarks='testing', header=None):
with DCDFile(in_name) as f_in, DCDFile(out_name, 'w') as f_out:
if header is None:
header = f_in.header
f_out.write_header(**header)
for frame in f_in:
f_out.write(xyz=frame.xyz, box=frame.unitcell)
@pytest.mark.xfail((os.name == 'nt'
and sys.maxsize <= 2**32) or
platform.machine() == 'aarch64',
reason="occasional fail on 32-bit windows and ARM")
# occasionally fails due to unreliable test timings
@hypothesis.settings(deadline=None) # see Issue 3096
@given(remarks=strategies.text(
alphabet=string.printable, min_size=0,
max_size=239)) # handle the printable ASCII strings
@example(remarks='')
def test_written_remarks_property(remarks, tmpdir_factory):
# property based testing for writing of a wide range of string
# values to REMARKS field
dcd = DCDFile(DCD)
dirname = str(id(remarks)) + "_"
testfile = str(tmpdir_factory.mktemp(dirname).join('test.dcd'))
header = dcd.header
header['remarks'] = remarks
write_dcd(DCD, testfile, header=header)
expected_remarks = remarks
with DCDFile(testfile) as f:
assert f.header['remarks'] == expected_remarks
@pytest.fixture(scope='session')
def written_dcd(tmpdir_factory):
with DCDFile(DCD) as dcd:
header = dcd.header
testfile = tmpdir_factory.mktemp('dcd').join('test.dcd')
testfile = str(testfile)
write_dcd(DCD, testfile)
Result = namedtuple("Result", "testfile, header, orgfile")
# throw away last char we didn't save due to null termination
header['remarks'] = header['remarks'][:-1]
return Result(testfile, header, DCD)
def test_written_header(written_dcd):
header = written_dcd.header
with DCDFile(written_dcd.testfile) as dcd:
dcdheader = dcd.header
assert dcdheader == header
def test_written_num_frames(written_dcd):
with DCDFile(written_dcd.testfile) as dcd, DCDFile(
written_dcd.orgfile) as other:
assert len(dcd) == len(other)
def test_written_dcd_coordinate_data_shape(written_dcd):
with DCDFile(written_dcd.testfile) as dcd, DCDFile(
written_dcd.orgfile) as other:
for frame, other_frame in zip(dcd, other):
assert frame.xyz.shape == other_frame.xyz.shape
def test_written_seek(written_dcd):
# ensure that we can seek properly on written DCD file
with DCDFile(written_dcd.testfile) as f:
f.seek(40)
assert_equal(f.tell(), 40)
def test_written_coord_match(written_dcd):
with DCDFile(written_dcd.testfile) as test, DCDFile(
written_dcd.orgfile) as ref:
for frame, o_frame in zip(test, ref):
assert_array_almost_equal(frame.xyz, o_frame.xyz)
def test_written_unit_cell(written_dcd):
with DCDFile(written_dcd.testfile) as test, DCDFile(
written_dcd.orgfile) as ref:
for frame, o_frame in zip(test, ref):
assert_array_almost_equal(frame.unitcell, o_frame.unitcell)
@pytest.mark.parametrize("dtype", (np.int32, np.int64, np.float32, np.float64,
int, float))
def test_write_all_dtypes(tmpdir, dtype):
fname = str(tmpdir.join('foo.dcd'))
with DCDFile(fname, 'w') as out:
natoms = 10
xyz = np.ones((natoms, 3), dtype=dtype)
box = np.ones(6, dtype=dtype)
out.write_header(
remarks='test',
natoms=natoms,
is_periodic=1,
delta=1,
nsavc=1,
istart=1)
out.write(xyz=xyz, box=box)
@pytest.mark.parametrize("array_like", (np.array, list))
def test_write_array_like(tmpdir, array_like):
fname = str(tmpdir.join('foo.dcd'))
with DCDFile(fname, 'w') as out:
natoms = 10
xyz = array_like([[1, 1, 1] for i in range(natoms)])
box = array_like([i for i in range(6)])
out.write_header(
remarks='test',
natoms=natoms,
is_periodic=1,
delta=1,
nsavc=1,
istart=1)
out.write(xyz=xyz, box=box)
def test_write_wrong_shape_xyz(tmpdir):
fname = str(tmpdir.join('foo.dcd'))
with DCDFile(fname, 'w') as out:
natoms = 10
xyz = np.ones((natoms + 1, 3))
box = np.ones(6)
out.write_header(
remarks='test',
natoms=natoms,
is_periodic=1,
delta=1,
nsavc=1,
istart=1)
with pytest.raises(ValueError):
out.write(xyz=xyz, box=box)
def test_write_wrong_shape_box(tmpdir):
fname = str(tmpdir.join('foo.dcd'))
with DCDFile(fname, 'w') as out:
natoms = 10
xyz = np.ones((natoms, 3))
box = np.ones(7)
out.write_header(
remarks='test',
natoms=natoms,
is_periodic=1,
delta=1,
nsavc=1,
istart=1)
with pytest.raises(ValueError):
out.write(xyz=xyz, box=box)
@pytest.mark.parametrize("dcdfile", (DCD, DCD_TRICLINIC, DCD_NAMD_TRICLINIC))
def test_relative_frame_sizes(dcdfile):
# the first frame of a DCD file should always be >= in size
# to subsequent frames, as the first frame contains the same
# atoms + (optional) fixed atoms
with DCDFile(dcdfile) as dcd:
first_frame_size = dcd._firstframesize
general_frame_size = dcd._framesize
assert first_frame_size >= general_frame_size
@pytest.mark.parametrize("dcdfile", (DCD, DCD_TRICLINIC, DCD_NAMD_TRICLINIC))
def test_file_size_breakdown(dcdfile):
# the size of a DCD file is equivalent to the sum of the header
# size, first frame size, and (N - 1 frames) * size per general
# frame
expected = os.path.getsize(dcdfile)
with DCDFile(dcdfile) as dcd:
actual = dcd._header_size + dcd._firstframesize + (
(dcd.n_frames - 1) * dcd._framesize)
assert actual == expected
@pytest.mark.parametrize("dcdfile", (DCD, DCD_TRICLINIC, DCD_NAMD_TRICLINIC))
def test_nframessize_int(dcdfile):
# require that the (nframessize / framesize) value used by DCDFile
# is an integer (because nframessize / framesize + 1 = total frames,
# which must also be an int)
filesize = os.path.getsize(dcdfile)
with DCDFile(dcdfile) as dcd:
nframessize = filesize - dcd._header_size - dcd._firstframesize
assert float(nframessize) % float(dcd._framesize) == 0
@pytest.mark.parametrize(
"slice, length",
[([None, None, None], 98), ([0, None, None], 98), ([None, 98, None], 98),
([None, None, 1], 98), ([None, None, -1], 98), ([2, 6, 2], 2),
([0, 10, None], 10), ([2, 10, None], 8), ([0, 1, 1], 1), ([1, 1, 1], 0),
([1, 2, 1], 1), ([1, 2, 2], 1), ([1, 4, 2], 2), ([1, 4, 4], 1), ([
0, 5, 5
], 1), ([3, 5, 1], 2), ([4, 0, -1], 4), ([5, 0, -2], 3), ([5, 0, -4], 2)])
def test_readframes_slices(slice, length, dcd):
start, stop, step = slice
allframes = dcd.readframes().xyz
frames = dcd.readframes(start=start, stop=stop, step=step)
xyz = frames.xyz
assert len(xyz) == length
assert_array_almost_equal(xyz, allframes[start:stop:step])
@pytest.mark.parametrize("order, shape", (
('fac', (98, 3341, 3)),
('fca', (98, 3, 3341)),
('afc', (3341, 98, 3)),
('acf', (3341, 3, 98)),
('caf', (3, 3341, 98)),
('cfa', (3, 98, 3341)), ))
def test_readframes_order(order, shape, dcd):
x = dcd.readframes(order=order).xyz
assert x.shape == shape
@pytest.mark.parametrize("indices", [[1, 2, 3, 4], [5, 10, 15, 19],
[9, 4, 2, 0, 50]])
def test_readframes_atomindices(indices, dcd):
allframes = dcd.readframes(order='afc').xyz
frames = dcd.readframes(indices=indices, order='afc')
xyz = frames.xyz
assert len(xyz) == len(indices)
assert_array_almost_equal(xyz, allframes[indices])
def test_write_random_unitcell(tmpdir):
testname = str(tmpdir.join('test.dcd'))
rstate = np.random.RandomState(1178083)
random_unitcells = rstate.uniform(high=80, size=(98, 6)).astype(np.float64)
with DCDFile(DCD) as f_in, DCDFile(testname, 'w') as f_out:
header = f_in.header
header['is_periodic'] = True
f_out.write_header(**header)
for index, frame in enumerate(f_in):
f_out.write(xyz=frame.xyz, box=random_unitcells[index])
with DCDFile(testname) as test:
for index, frame in enumerate(test):
assert_array_almost_equal(frame.unitcell, random_unitcells[index])
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/formats/test_libdcd.py
|
Python
|
gpl-2.0
| 20,233
|
[
"CHARMM",
"MDAnalysis"
] |
a282f1a5340f2ccdb82e61a09159749750609a8ddc5341126c67c01bd225c003
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.