text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#
# Copyright (C) 2003-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" contains factory class for producing signatures
"""
import copy
import numpy
from rdkit.Chem.Pharm2D import Utils
from rdkit.DataStructs import SparseBitVect, IntSparseIntVect, LongSparseIntVect
_verbose = False
class SigFactory(object):
"""
SigFactory's are used by creating one, setting the relevant
parameters, then calling the GetSignature() method each time a
signature is required.
"""
def __init__(self, featFactory, useCounts=False, minPointCount=2, maxPointCount=3,
shortestPathsOnly=True, includeBondOrder=False, skipFeats=None,
trianglePruneBins=True):
self.featFactory = featFactory
self.useCounts = useCounts
self.minPointCount = minPointCount
self.maxPointCount = maxPointCount
self.shortestPathsOnly = shortestPathsOnly
self.includeBondOrder = includeBondOrder
self.trianglePruneBins = trianglePruneBins
if skipFeats is None:
self.skipFeats = []
else:
self.skipFeats = skipFeats
self._bins = None
self.sigKlass = None
def SetBins(self, bins):
""" bins should be a list of 2-tuples """
self._bins = copy.copy(bins)
self.Init()
def GetBins(self):
return self._bins
def GetNumBins(self):
return len(self._bins)
def GetSignature(self):
return self.sigKlass(self._sigSize)
def _GetBitSummaryData(self, bitIdx):
nPts, combo, scaffold = self.GetBitInfo(bitIdx)
fams = self.GetFeatFamilies()
labels = [fams[x] for x in combo]
dMat = numpy.zeros((nPts, nPts), numpy.int)
dVect = Utils.nPointDistDict[nPts]
for idx in range(len(dVect)):
i, j = dVect[idx]
dMat[i, j] = scaffold[idx]
dMat[j, i] = scaffold[idx]
return nPts, combo, scaffold, labels, dMat
def GetBitDescriptionAsText(self, bitIdx, includeBins=0, fullPage=1):
""" returns text with a description of the bit
**Arguments**
- bitIdx: an integer bit index
- includeBins: (optional) if nonzero, information about the bins will be
included as well
- fullPage: (optional) if nonzero, html headers and footers will
be included (so as to make the output a complete page)
**Returns**
a string with the HTML
"""
raise NotImplementedError('Missing implementation')
def GetBitDescription(self, bitIdx):
""" returns a text description of the bit
**Arguments**
- bitIdx: an integer bit index
**Returns**
a string
"""
nPts, combo, scaffold, labels, dMat = self._GetBitSummaryData(bitIdx)
res = " ".join(labels) + " "
for row in dMat:
res += "|" + " ".join([str(x) for x in row])
res += "|"
return res
def _findBinIdx(self, dists, bins, scaffolds):
""" OBSOLETE: this has been rewritten in C++
Internal use only
Returns the index of a bin defined by a set of distances.
**Arguments**
- dists: a sequence of distances (not binned)
- bins: a sorted sequence of distance bins (2-tuples)
- scaffolds: a list of possible scaffolds (bin combinations)
**Returns**
an integer bin index
**Note**
the value returned here is not an index in the overall
signature. It is, rather, an offset of a scaffold in the
possible combinations of distance bins for a given
proto-pharmacophore.
"""
nDists = len(dists)
whichBins = [0] * nDists
# This would be a ton easier if we had contiguous bins
# i.e. if we could maintain the bins as a list of bounds)
# because then we could use Python's bisect module.
# Since we can't do that, we've got to do our own binary
# search here.
for i in range(nDists):
dist = dists[i]
where = -1
# do a simple binary search:
startP, endP = 0, len(bins)
while startP < endP:
midP = (startP + endP) // 2
begBin, endBin = bins[midP]
if dist < begBin:
endP = midP
elif dist >= endBin:
startP = midP + 1
else:
where = midP
break
if where < 0:
return None
whichBins[i] = where
res = scaffolds.index(tuple(whichBins))
if _verbose:
print('----- _fBI -----------')
print(' scaffolds:', scaffolds)
print(' bins:', whichBins)
print(' res:', res)
return res
def GetFeatFamilies(self):
fams = [fam for fam in self.featFactory.GetFeatureFamilies() if fam not in self.skipFeats]
fams.sort()
return fams
def GetMolFeats(self, mol):
featFamilies = self.GetFeatFamilies()
featMatches = {}
for fam in featFamilies:
featMatches[fam] = []
feats = self.featFactory.GetFeaturesForMol(mol, includeOnly=fam)
for feat in feats:
featMatches[fam].append(feat.GetAtomIds())
return [featMatches[x] for x in featFamilies]
def GetBitIdx(self, featIndices, dists, sortIndices=True):
""" returns the index for a pharmacophore described using a set of
feature indices and distances
**Arguments***
- featIndices: a sequence of feature indices
- dists: a sequence of distance between the features, only the
unique distances should be included, and they should be in the
order defined in Utils.
- sortIndices : sort the indices
**Returns**
the integer bit index
"""
nPoints = len(featIndices)
if nPoints > 3:
raise NotImplementedError('>3 points not supported')
if nPoints < self.minPointCount:
raise IndexError('bad number of points')
if nPoints > self.maxPointCount:
raise IndexError('bad number of points')
# this is the start of the nPoint-point pharmacophores
startIdx = self._starts[nPoints]
#
# now we need to map the pattern indices to an offset from startIdx
#
if sortIndices:
tmp = list(featIndices)
tmp.sort()
featIndices = tmp
if featIndices[0] < 0:
raise IndexError('bad feature index')
if max(featIndices) >= self._nFeats:
raise IndexError('bad feature index')
if nPoints == 3:
featIndices, dists = Utils.OrderTriangle(featIndices, dists)
offset = Utils.CountUpTo(self._nFeats, nPoints, featIndices)
if _verbose:
print('offset for feature %s: %d' % (str(featIndices), offset))
offset *= len(self._scaffolds[len(dists)])
try:
if _verbose:
print('>>>>>>>>>>>>>>>>>>>>>>>')
print('\tScaffolds:', repr(self._scaffolds[len(dists)]), type(
self._scaffolds[len(dists)]))
print('\tDists:', repr(dists), type(dists))
print('\tbins:', repr(self._bins), type(self._bins))
bin_ = self._findBinIdx(dists, self._bins, self._scaffolds[len(dists)])
except ValueError:
fams = self.GetFeatFamilies()
fams = [fams[x] for x in featIndices]
raise IndexError('distance bin not found: feats: %s; dists=%s; bins=%s; scaffolds: %s' %
(fams, dists, self._bins, self._scaffolds))
return startIdx + offset + bin_
def GetBitInfo(self, idx):
""" returns information about the given bit
**Arguments**
- idx: the bit index to be considered
**Returns**
a 3-tuple:
1) the number of points in the pharmacophore
2) the proto-pharmacophore (tuple of pattern indices)
3) the scaffold (tuple of distance indices)
"""
if idx >= self._sigSize:
raise IndexError('bad index (%d) queried. %d is the max' % (idx, self._sigSize))
# first figure out how many points are in the p'cophore
nPts = self.minPointCount
while nPts < self.maxPointCount and self._starts[nPts + 1] <= idx:
nPts += 1
# how far are we in from the start point?
offsetFromStart = idx - self._starts[nPts]
if _verbose:
print('\t %d Points, %d offset' % (nPts, offsetFromStart))
# lookup the number of scaffolds
nDists = len(Utils.nPointDistDict[nPts])
scaffolds = self._scaffolds[nDists]
nScaffolds = len(scaffolds)
# figure out to which proto-pharmacophore we belong:
protoIdx = offsetFromStart // nScaffolds
indexCombos = Utils.GetIndexCombinations(self._nFeats, nPts)
combo = tuple(indexCombos[protoIdx])
if _verbose:
print('\t combo: %s' % (str(combo)))
# and which scaffold:
scaffoldIdx = offsetFromStart % nScaffolds
scaffold = scaffolds[scaffoldIdx]
if _verbose:
print('\t scaffold: %s' % (str(scaffold)))
return nPts, combo, scaffold
def Init(self):
""" Initializes internal parameters. This **must** be called after
making any changes to the signature parameters
"""
accum = 0
self._scaffolds = [0] * (len(Utils.nPointDistDict[self.maxPointCount + 1]))
self._starts = {}
if not self.skipFeats:
self._nFeats = len(self.featFactory.GetFeatureFamilies())
else:
self._nFeats = 0
for fam in self.featFactory.GetFeatureFamilies():
if fam not in self.skipFeats:
self._nFeats += 1
for i in range(self.minPointCount, self.maxPointCount + 1):
self._starts[i] = accum
nDistsHere = len(Utils.nPointDistDict[i])
scaffoldsHere = Utils.GetPossibleScaffolds(i, self._bins,
useTriangleInequality=self.trianglePruneBins)
nBitsHere = len(scaffoldsHere)
self._scaffolds[nDistsHere] = scaffoldsHere
pointsHere = Utils.NumCombinations(self._nFeats, i) * nBitsHere
accum += pointsHere
self._sigSize = accum
if not self.useCounts:
self.sigKlass = SparseBitVect
elif self._sigSize < 2**31:
self.sigKlass = IntSparseIntVect
else:
self.sigKlass = LongSparseIntVect
def GetSigSize(self):
return self._sigSize
try:
from rdkit.Chem.Pharmacophores import cUtils
except ImportError:
pass
else:
SigFactory._findBinIdx = cUtils.FindBinIdx
| greglandrum/rdkit | rdkit/Chem/Pharm2D/SigFactory.py | Python | bsd-3-clause | 11,319 | [
"RDKit"
] | efa3f7b49528886e04ded0a8d75a0b051686c7119c2fdd1714e1e9e9aeed97b7 |
from __future__ import absolute_import
from . import ir as I
from .walk import IRWalker, propigate_location
class EvaluateCompileTime(IRWalker):
descend_into_functions = True
def __init__(self, eval_ir):
super(EvaluateCompileTime, self).__init__()
self.eval_ir = eval_ir
def visit_compile_time_value(self, node):
I.replace_child(node, propigate_location(node, I.make_constant(self.eval_ir(node.expression))))
def evaluate_compile_time_values(node, eval_ir):
EvaluateCompileTime(eval_ir).visit(node)
return node
| matthagy/Jamenson | jamenson/compiler/preeval.py | Python | apache-2.0 | 564 | [
"VisIt"
] | afe4cd54e22b48fbf11190a4a0a8fb86503b1e03e5c5703cc5a619e51edeb2fa |
from __future__ import unicode_literals
import unittest
from django.utils import html
class TestUtilsHtml(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{0} {1} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('<f', '<f'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_clean_html(self):
f = html.clean_html
items = (
('<p>I <i>believe</i> in <b>semantic markup</b>!</p>', '<p>I <em>believe</em> in <strong>semantic markup</strong>!</p>'),
('I escape & I don\'t <a href="#" target="_blank">target</a>', 'I escape & I don\'t <a href="#" >target</a>'),
('<p>I kill whitespace</p><br clear="all"><p> </p>', '<p>I kill whitespace</p>'),
# also a regression test for #7267: this used to raise an UnicodeDecodeError
('<p>* foo</p><p>* bar</p>', '<ul>\n<li> foo</li><li> bar</li>\n</ul>'),
)
for value, output in items:
self.check_output(f, value, output)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
| chrisfranzen/django | tests/regressiontests/utils/html.py | Python | bsd-3-clause | 6,410 | [
"ADF"
] | 23d29c5cff55e5e82587448f2408942b0fea7ef1d5229cbc27cc25403689e05c |
#!/usr/bin/env python
#
import webapp2
import re
from google.appengine.ext import db
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import xml.etree.ElementTree as ET
import logging
headers = '''<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>%s - schema.org</title>
<meta name="description" content="Schema.org is a set of extensible schemas that enables webmasters to embed
structured data on their web pages for use by search engines and other applications." />
<link rel="stylesheet" type="text/css"
href="/docs/schemaorg.css" />
<link href="/docs/prettify.css" type="text/css"
rel="stylesheet" />
<script type="text/javascript" src="/docs/prettify.js">
</script>
<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/1.5.1/jquery.min.js"></script>
<script type="text/javascript">
$(document).ready(function(){
prettyPrint();
setTimeout(function(){
$(".atn:contains(itemscope), .atn:contains(itemtype), .atn:contains(itemprop), .atn:contains(itemid), .atn:contains(time), .atn:contains(datetime), .atn:contains(datetime), .tag:contains(time) ").addClass(\'new\');
$('.new + .pun + .atv\').addClass(\'curl\');
}, 500);
setTimeout(function(){
$(".atn:contains(property), .atn:contains(typeof) ").addClass(\'new\');
$('.new + .pun + .atv\').addClass(\'curl\');
}, 500);
setTimeout(function() {
$('.ds-selector-tabs .selectors a').click(function() {
var $this = $(this);
var $p = $this.parents('.ds-selector-tabs');
$('.selected', $p).removeClass('selected');
$this.addClass('selected');
$('pre.' + $this.data('selects'), $p).addClass('selected');
});
}, 0);
});
</script>
<style>
.pln { color: #444; } /* plain text */
.tag { color: #515484; } /* div, span, a, etc */
.atn,
.atv { color: #314B17; } /* href, datetime */
.new { color: #660003; } /* itemscope, itemtype, etc,. */
.curl { color: #080; } /* new url */
table.definition-table {
border-spacing: 3px;
border-collapse: separate;
}
</style>
</head>
<body>
<div id="container">
<div id="intro">
<div id="pageHeader">
<div class="wrapper">
<h1>schema.org</h1>
<div id="cse-search-form" style="width: 400px;"></div>
<script type="text/javascript" src="//www.google.com/jsapi"></script>
<script type="text/javascript">
google.load(\'search\', \'1\', {language : \'en\', style : google.loader.themes.ESPRESSO});
google.setOnLoadCallback(function() {
var customSearchControl = new google.search.CustomSearchControl(\'013516846811604855281:nj5laplixaa\');
customSearchControl.setResultSetSize(google.search.Search.FILTERED_CSE_RESULTSET);
var options = new google.search.DrawOptions();
options.enableSearchboxOnly("/docs/search_results.html", null, false, \'#\');
customSearchControl.draw(\'cse-search-form\', options);
}, true);
</script>
</div>
</div>
</div>
</div>
<div id="selectionbar">
<div class="wrapper">
<ul>
<li >
<a href="docs/documents.html">Documentation</a></li>
<li class="activelink">
<a href="docs/schemas.html">Schemas</a></li>
<li >
<a href=".">Home</a></li>
</ul>
</div>
</div>
<div style="padding: 14px; float: right;" id="languagebox"></div>
<div id="mainContent" vocab="http://schema.org/" typeof="%s" resource="http://schema.org/%s">
%s
'''
def OutputSchemaorgHeaders(webapp, entry='', is_class=False, ext_mappings=''):
"""
Generates the headers for class and property pages
* entry = name of the class or property
"""
rdfs_type = 'rdfs:Property'
if is_class:
rdfs_type = 'rdfs:Class'
out = headers % (str(entry), rdfs_type, str(entry), ext_mappings)
webapp.response.write(out)
| westurner/schemaorg | headers.py | Python | apache-2.0 | 4,372 | [
"ESPResSo"
] | f3664359ba353503660e703004f3cd14a41f913f61926a1e5b787ca6b6cab694 |
#!/usr/local/bin/python
# -*- python -*-
#
# HPCToolkit MPI Profiler
# this script is adapted from mpiP MPI Profiler ( http://mpip.sourceforge.net/ )
#
# Please see COPYRIGHT AND LICENSE information at the end of this file.
#
#
# make-wrappers.py -- parse the mpi prototype file and generate a
# series of output files, which include the wrappers for profiling
# layer and other data structures.
#
# $Id: make-wrappers.py 442 2010-03-03 17:18:04Z chcham $
#
import sys
import string
import os
import copy
import re
import time
import getopt
import socket
import pdb
cnt = 0
fdict = {}
lastFunction = "NULL"
verbose = 0
baseID = 1000
messParamDict = {
( "MPI_Allgather", "sendcount"):1,
( "MPI_Allgather", "sendtype"):2,
( "MPI_Allgatherv", "sendcount"):1,
( "MPI_Allgatherv", "sendtype"):2,
( "MPI_Allreduce", "count"):1,
( "MPI_Allreduce", "datatype"):2,
( "MPI_Alltoall", "sendcount"):1,
( "MPI_Alltoall", "sendtype"):2,
( "MPI_Bcast", "count"):1,
( "MPI_Bcast", "datatype"):2,
( "MPI_Bsend", "count"):1,
( "MPI_Bsend", "datatype"):2,
( "MPI_Gather", "sendcnt"):1,
( "MPI_Gather", "sendtype"):2,
( "MPI_Gatherv", "sendcnt"):1,
( "MPI_Gatherv", "sendtype"):2,
( "MPI_Ibsend", "count"):1,
( "MPI_Ibsend", "datatype"):2,
( "MPI_Irecv", "count"):1,
( "MPI_Irecv", "datatype"):2,
( "MPI_Irsend", "count"):1,
( "MPI_Irsend", "datatype"):2,
( "MPI_Isend", "count"):1,
( "MPI_Isend", "datatype"):2,
( "MPI_Issend", "count"):1,
( "MPI_Issend", "datatype"):2,
( "MPI_Recv", "count"):1,
( "MPI_Recv", "datatype"):2,
( "MPI_Reduce", "count"):1,
( "MPI_Reduce", "datatype"):2,
( "MPI_Rsend", "count"):1,
( "MPI_Rsend", "datatype"):2,
( "MPI_Scan", "count"):1,
( "MPI_Scan", "datatype"):2,
( "MPI_Scatter", "sendcnt"):1,
( "MPI_Scatter", "sendtype"):2,
( "MPI_Send", "count"):1,
( "MPI_Send", "datatype"):2,
( "MPI_Sendrecv", "sendcount"):1,
( "MPI_Sendrecv", "sendtype"):2,
( "MPI_Sendrecv_replace", "count"):1,
( "MPI_Sendrecv_replace", "datatype"):2,
( "MPI_Ssend", "count"):1,
( "MPI_Ssend", "datatype"):2
}
ioParamDict = {
( "MPI_File_read", "count"):1,
( "MPI_File_read", "datatype"):2,
( "MPI_File_read_all", "count"):1,
( "MPI_File_read_all", "datatype"):2,
( "MPI_File_read_at", "count"):1,
( "MPI_File_read_at", "datatype"):2,
( "MPI_File_write", "count"):1,
( "MPI_File_write", "datatype"):2,
( "MPI_File_write_all", "count"):1,
( "MPI_File_write_all", "datatype"):2,
( "MPI_File_write_at", "count"):1,
( "MPI_File_write_at", "datatype"):2
}
rmaParamDict = {
( "MPI_Accumulate", "target_count"):1,
( "MPI_Accumulate", "target_datatype"):2,
( "MPI_Get", "origin_count"):1,
( "MPI_Get", "origin_datatype"):2,
( "MPI_Put", "origin_count"):1,
( "MPI_Put", "origin_datatype"):2
}
noDefineList = [
"MPI_Pcontrol"
]
opaqueInArgDict = {
("MPI_Abort", "comm"):"MPI_Comm",
("MPI_Accumulate", "origin_datatype"):"MPI_Datatype",
("MPI_Accumulate", "target_datatype"):"MPI_Datatype",
("MPI_Accumulate", "op"):"MPI_Op",
("MPI_Accumulate", "win"):"MPI_Win",
("MPI_Allgather", "comm"):"MPI_Comm",
("MPI_Allgather", "recvtype"):"MPI_Datatype",
("MPI_Allgather", "sendtype"):"MPI_Datatype",
("MPI_Allgatherv", "comm"):"MPI_Comm",
("MPI_Allgatherv", "recvtype"):"MPI_Datatype",
("MPI_Allgatherv", "sendtype"):"MPI_Datatype",
("MPI_Allreduce", "comm"):"MPI_Comm",
("MPI_Allreduce", "datatype"):"MPI_Datatype",
("MPI_Allreduce", "op"):"MPI_Op",
("MPI_Alltoall", "comm"):"MPI_Comm",
("MPI_Alltoall", "recvtype"):"MPI_Datatype",
("MPI_Alltoall", "sendtype"):"MPI_Datatype",
("MPI_Alltoallv", "comm"):"MPI_Comm",
("MPI_Alltoallv", "recvtype"):"MPI_Datatype",
("MPI_Alltoallv", "sendtype"):"MPI_Datatype",
("MPI_Attr_delete", "comm"):"MPI_Comm",
("MPI_Attr_get", "comm"):"MPI_Comm",
("MPI_Attr_put", "comm"):"MPI_Comm",
("MPI_Attr_put", "comm"):"MPI_Comm",
("MPI_Barrier", "comm"):"MPI_Comm",
("MPI_Bcast", "datatype"):"MPI_Datatype",
("MPI_Bcast", "comm"):"MPI_Comm",
("MPI_Bsend", "comm"):"MPI_Comm",
("MPI_Bsend", "datatype"):"MPI_Datatype",
("MPI_Bsend_init", "comm"):"MPI_Comm",
("MPI_Bsend_init", "datatype"):"MPI_Datatype",
("MPI_Cancel", "request"):"MPI_Request",
("MPI_Cart_coords", "comm"):"MPI_Comm",
("MPI_Cart_create", "comm_old"):"MPI_Comm",
("MPI_Cart_get", "comm"):"MPI_Comm",
("MPI_Cart_map", "comm_old"):"MPI_Comm",
("MPI_Cart_rank", "comm"):"MPI_Comm",
("MPI_Cart_shift", "comm"):"MPI_Comm",
("MPI_Cart_sub", "comm"):"MPI_Comm",
("MPI_Cartdim_get", "comm"):"MPI_Comm",
("MPI_Comm_compare", "comm1"):"MPI_Comm",
("MPI_Comm_compare", "comm2"):"MPI_Comm",
("MPI_Comm_create", "comm"):"MPI_Comm",
("MPI_Comm_create", "group"):"MPI_Group",
("MPI_Comm_dup", "comm"):"MPI_Comm",
("MPI_Comm_free", "commp"):"MPI_Comm",
("MPI_Comm_group", "comm"):"MPI_Comm",
("MPI_Comm_rank", "comm"):"MPI_Comm",
("MPI_Comm_remote_group", "comm"):"MPI_Comm",
("MPI_Comm_remote_size", "comm"):"MPI_Comm",
("MPI_Comm_size", "comm"):"MPI_Comm",
("MPI_Comm_split", "comm"):"MPI_Comm",
("MPI_Comm_test_inter", "comm"):"MPI_Comm",
("MPI_Errhandler_get", "comm"):"MPI_Comm",
("MPI_Errhandler_set", "comm"):"MPI_Comm",
("MPI_File_close", "fh"):"MPI_File",
("MPI_File_open", "comm"):"MPI_Comm",
("MPI_File_open", "info"):"MPI_Info",
("MPI_File_preallocate", "fh"):"MPI_File",
("MPI_File_read", "fh"):"MPI_File",
("MPI_File_read", "datatype"):"MPI_Datatype",
("MPI_File_read_all", "fh"):"MPI_File",
("MPI_File_read_all", "datatype"):"MPI_Datatype",
("MPI_File_read_at", "fh"):"MPI_File",
("MPI_File_read_at", "datatype"):"MPI_Datatype",
("MPI_File_seek", "fh"):"MPI_File",
("MPI_File_set_view", "fh"):"MPI_File",
("MPI_File_set_view", "etype"):"MPI_Datatype",
("MPI_File_set_view", "filetype"):"MPI_Datatype",
("MPI_File_set_view", "info"):"MPI_Info",
("MPI_File_write", "fh"):"MPI_File",
("MPI_File_write", "datatype"):"MPI_Datatype",
("MPI_File_write_all", "fh"):"MPI_File",
("MPI_File_write_all", "datatype"):"MPI_Datatype",
("MPI_File_write_at", "fh"):"MPI_File",
("MPI_File_write_at", "datatype"):"MPI_Datatype",
("MPI_Gather", "comm"):"MPI_Comm",
("MPI_Gather", "recvtype"):"MPI_Datatype",
("MPI_Gather", "sendtype"):"MPI_Datatype",
("MPI_Gatherv", "comm"):"MPI_Comm",
("MPI_Gatherv", "recvtype"):"MPI_Datatype",
("MPI_Gatherv", "sendtype"):"MPI_Datatype",
("MPI_Get", "origin_datatype"):"MPI_Datatype",
("MPI_Get", "target_datatype"):"MPI_Datatype",
("MPI_Get", "win"):"MPI_Win",
("MPI_Get_count", "datatype"):"MPI_Datatype",
("MPI_Get_elements", "datatype"):"MPI_Datatype",
("MPI_Graph_create", "comm_old"):"MPI_Comm",
("MPI_Graph_get", "comm"):"MPI_Comm",
("MPI_Graph_map", "comm_old"):"MPI_Comm",
("MPI_Graph_neighbors", "comm"):"MPI_Comm",
("MPI_Graph_neighbors_count", "comm"):"MPI_Comm",
("MPI_Graphdims_get", "comm"):"MPI_Comm",
("MPI_Group_compare", "group1"):"MPI_Group",
("MPI_Group_compare", "group2"):"MPI_Group",
("MPI_Group_difference", "group1"):"MPI_Group",
("MPI_Group_difference", "group2"):"MPI_Group",
("MPI_Group_excl", "group"):"MPI_Group",
("MPI_Group_free", "group"):"MPI_Group",
("MPI_Group_incl", "group"):"MPI_Group",
("MPI_Group_intersection", "group1"):"MPI_Group",
("MPI_Group_intersection", "group2"):"MPI_Group",
("MPI_Group_range_excl", "group"):"MPI_Group",
("MPI_Group_range_incl", "group"):"MPI_Group",
("MPI_Group_rank", "group"):"MPI_Group",
("MPI_Group_size", "group"):"MPI_Group",
("MPI_Group_translate_ranks", "group_a"):"MPI_Group",
("MPI_Group_translate_ranks", "group_b"):"MPI_Group",
("MPI_Group_union", "group1"):"MPI_Group",
("MPI_Group_union", "group2"):"MPI_Group",
("MPI_Ibsend", "comm"):"MPI_Comm",
("MPI_Ibsend", "datatype"):"MPI_Datatype",
("MPI_Intercomm_create", "local_comm"):"MPI_Comm",
("MPI_Intercomm_create", "peer_comm"):"MPI_Comm",
("MPI_Intercomm_merge", "comm"):"MPI_Comm",
("MPI_Iprobe", "comm"):"MPI_Comm",
("MPI_Irecv", "comm"):"MPI_Comm",
("MPI_Irecv", "datatype"):"MPI_Datatype",
("MPI_Irsend", "comm"):"MPI_Comm",
("MPI_Irsend", "datatype"):"MPI_Datatype",
("MPI_Isend", "comm"):"MPI_Comm",
("MPI_Isend", "datatype"):"MPI_Datatype",
("MPI_Issend", "comm"):"MPI_Comm",
("MPI_Issend", "datatype"):"MPI_Datatype",
("MPI_Pack", "comm"):"MPI_Comm",
("MPI_Pack", "datatype"):"MPI_Datatype",
("MPI_Pack_size", "comm"):"MPI_Comm",
("MPI_Pack_size", "datatype"):"MPI_Datatype",
("MPI_Probe", "comm"):"MPI_Comm",
("MPI_Put", "origin_datatype"):"MPI_Datatype",
("MPI_Put", "target_datatype"):"MPI_Datatype",
("MPI_Put", "win"):"MPI_Win",
("MPI_Recv", "comm"):"MPI_Comm",
("MPI_Recv", "datatype"):"MPI_Datatype",
("MPI_Recv_init", "comm"):"MPI_Comm",
("MPI_Recv_init", "datatype"):"MPI_Datatype",
("MPI_Reduce", "comm"):"MPI_Comm",
("MPI_Reduce", "datatype"):"MPI_Datatype",
("MPI_Reduce", "op"):"MPI_Op",
("MPI_Reduce_scatter", "comm"):"MPI_Comm",
("MPI_Reduce_scatter", "datatype"):"MPI_Datatype",
("MPI_Reduce_scatter", "op"):"MPI_Op",
("MPI_Request_free", "request"):"MPI_Request",
("MPI_Rsend", "comm"):"MPI_Comm",
("MPI_Rsend", "datatype"):"MPI_Datatype",
("MPI_Rsend_init", "comm"):"MPI_Comm",
("MPI_Rsend_init", "datatype"):"MPI_Datatype",
("MPI_Scan", "comm"):"MPI_Comm",
("MPI_Scan", "op"):"MPI_Op",
("MPI_Scan", "datatype"):"MPI_Datatype",
("MPI_Scatter", "comm"):"MPI_Comm",
("MPI_Scatter", "recvtype"):"MPI_Datatype",
("MPI_Scatter", "sendtype"):"MPI_Datatype",
("MPI_Scatterv", "comm"):"MPI_Comm",
("MPI_Scatterv", "recvtype"):"MPI_Datatype",
("MPI_Scatterv", "sendtype"):"MPI_Datatype",
("MPI_Send", "comm"):"MPI_Comm",
("MPI_Send", "datatype"):"MPI_Datatype",
("MPI_Send_init", "comm"):"MPI_Comm",
("MPI_Send_init", "datatype"):"MPI_Datatype",
("MPI_Sendrecv", "comm"):"MPI_Comm",
("MPI_Sendrecv", "recvtag"):"MPI_Datatype",
("MPI_Sendrecv", "recvtype"):"MPI_Datatype",
("MPI_Sendrecv", "sendtype"):"MPI_Datatype",
("MPI_Sendrecv_replace", "comm"):"MPI_Comm",
("MPI_Sendrecv_replace", "datatype"):"MPI_Datatype",
("MPI_Ssend", "comm"):"MPI_Comm",
("MPI_Ssend", "datatype"):"MPI_Datatype",
("MPI_Ssend_init", "comm"):"MPI_Comm",
("MPI_Ssend_init", "datatype"):"MPI_Datatype",
("MPI_Start", "request"):"MPI_Request",
("MPI_Startall", "array_of_requests"):"MPI_Request",
("MPI_Test", "request"):"MPI_Request",
("MPI_Testall", "array_of_requests"):"MPI_Request",
("MPI_Testany", "array_of_requests"):"MPI_Request",
("MPI_Testsome", "array_of_requests"):"MPI_Request",
("MPI_Topo_test", "comm"):"MPI_Comm",
("MPI_Type_commit", "datatype"):"MPI_Datatype",
("MPI_Type_contiguous", "oldtype"):"MPI_Datatype",
("MPI_Type_extent", "datatype"):"MPI_Datatype",
("MPI_Type_free", "datatype"):"MPI_Datatype",
("MPI_Type_get_contents", "datatype"):"MPI_Datatype",
("MPI_Type_get_envelope", "datatype"):"MPI_Datatype",
("MPI_Type_hindexed", "oldtype"):"MPI_Datatype",
("MPI_Type_hvector", "oldtype"):"MPI_Datatype",
("MPI_Type_indexed", "oldtype"):"MPI_Datatype",
("MPI_Type_lb", "datatype"):"MPI_Datatype",
("MPI_Type_size", "datatype"):"MPI_Datatype",
("MPI_Type_struct", "array_of_types"):"MPI_Datatype",
("MPI_Type_ub", "datatype"):"MPI_Datatype",
("MPI_Type_vector", "oldtype"):"MPI_Datatype",
("MPI_Unpack", "comm"):"MPI_Comm",
("MPI_Unpack", "datatype"):"MPI_Datatype",
("MPI_Wait", "request"):"MPI_Request",
("MPI_Waitall", "array_of_requests"):"MPI_Request",
("MPI_Waitany", "array_of_requests"):"MPI_Request",
("MPI_Waitsome", "array_of_requests"):"MPI_Request",
("MPI_Win_complete", "win"):"MPI_Win",
("MPI_Win_create", "info"):"MPI_Info",
("MPI_Win_create", "comm"):"MPI_Comm",
("MPI_Win_create", "win"):"MPI_Win",
("MPI_Win_fence", "win"):"MPI_Win",
("MPI_Win_free", "win"):"MPI_Win",
("MPI_Win_get_group", "win"):"MPI_Win",
("MPI_Win_get_group", "group"):"MPI_Group",
("MPI_Win_lock", "win"):"MPI_Win",
("MPI_Win_post", "group"):"MPI_Group",
("MPI_Win_post", "win"):"MPI_Win",
("MPI_Win_start", "group"):"MPI_Group",
("MPI_Win_start", "win"):"MPI_Win",
("MPI_Win_test", "win"):"MPI_Win",
("MPI_Win_unlock", "win"):"MPI_Win",
("MPI_Win_wait", "win"):"MPI_Win"
}
opaqueOutArgDict = {
("MPI_Bsend_init", "request"):"MPI_Request",
("MPI_Cart_create", "comm_cart"):"MPI_Comm",
("MPI_Cart_sub", "comm_new"):"MPI_Comm",
("MPI_Comm_create", "comm_out"):"MPI_Comm",
("MPI_Comm_dup", "comm_out"):"MPI_Comm",
("MPI_Comm_free", "commp"):"MPI_Comm",
("MPI_Comm_group", "group"):"MPI_Group",
("MPI_Comm_remote_group", "group"):"MPI_Group",
("MPI_Comm_split", "comm_out"):"MPI_Comm",
("MPI_File_close", "fh"):"MPI_File",
("MPI_File_open", "fh"):"MPI_File",
("MPI_Graph_create", "comm_graph"):"MPI_Comm",
("MPI_Group_difference", "group_out"):"MPI_Group",
("MPI_Group_excl", "newgroup"):"MPI_Group",
("MPI_Group_free", "group"):"MPI_Group",
("MPI_Group_incl", "group_out"):"MPI_Group",
("MPI_Group_intersection", "group_out"):"MPI_Group",
("MPI_Group_range_excl", "newgroup"):"MPI_Group",
("MPI_Group_range_incl", "newgroup"):"MPI_Group",
("MPI_Group_union", "group_out"):"MPI_Group",
("MPI_Ibsend", "request"):"MPI_Request",
("MPI_Intercomm_create", "comm_out"):"MPI_Comm",
("MPI_Intercomm_merge", "comm_out"):"MPI_Comm",
("MPI_Irecv", "request"):"MPI_Request",
("MPI_Irsend", "request"):"MPI_Request",
("MPI_Isend", "request"):"MPI_Request",
("MPI_Issend", "request"):"MPI_Request",
("MPI_Op_create", "op"):"MPI_Op",
("MPI_Recv_init", "request"):"MPI_Request",
("MPI_Request_free", "request"):"MPI_Request",
("MPI_Rsend_init", "request"):"MPI_Request",
("MPI_Send_init", "request"):"MPI_Request",
("MPI_Ssend_init", "request"):"MPI_Request",
("MPI_Start", "request"):"MPI_Request",
("MPI_Startall", "array_of_requests"):"MPI_Request",
("MPI_Test", "request"):"MPI_Request",
("MPI_Testall", "array_of_requests"):"MPI_Request",
("MPI_Testany", "array_of_requests"):"MPI_Request",
("MPI_Testsome", "array_of_requests"):"MPI_Request",
("MPI_Type_commit", "datatype"):"MPI_Datatype",
("MPI_Type_contiguous", "newtype"):"MPI_Datatype",
("MPI_Type_free", "datatype"):"MPI_Datatype",
("MPI_Type_get_contents", "array_of_datatypes"):"MPI_Datatype",
("MPI_Type_hindexed", "newtype"):"MPI_Datatype",
("MPI_Type_hvector", "newtype"):"MPI_Datatype",
("MPI_Type_indexed", "newtype"):"MPI_Datatype",
("MPI_Type_struct", "newtype"):"MPI_Datatype",
("MPI_Type_vector", "newtype"):"MPI_Datatype",
("MPI_Wait", "request"):"MPI_Request",
("MPI_Waitall", "array_of_requests"):"MPI_Request",
("MPI_Waitany", "array_of_requests"):"MPI_Request",
("MPI_Waitsome", "array_of_requests"):"MPI_Request"
}
incrementFortranIndexDict = {
("MPI_Testany"): ("*index", 1),
("MPI_Testsome"): ("array_of_indices", "*count"),
("MPI_Waitany"): ("*index", 1),
("MPI_Waitsome"): ("array_of_indices", "*count")
}
xlateFortranArrayExceptions = {
("MPI_Testany", "array_of_requests"): ("index"),
("MPI_Waitany", "array_of_requests"): ("index")
}
class VarDesc:
def __init__ (self,name, basetype, pointerLevel, arrayLevel):
"initialize a new variable description structure"
self.name = name
self.basetype = basetype
self.pointerLevel = pointerLevel
self.arrayLevel = arrayLevel
self.recordIt = 0
class fdecl:
def __init__ (self,name, id, returntype, paramList, paramStr, protoline):
"initialize a new function declaration structure"
self.name = name
self.id = id
self.returntype = returntype
self.paramList = paramList
self.paramStr = paramStr
self.paramDict = {}
self.protoline = protoline
self.wrapperPreList = []
self.wrapperPostList = []
self.nowrapper = 0
self.paramConciseList = []
self.extrafields = {}
self.extrafieldsList = []
self.sendCountPname = ""
self.sendTypePname = ""
self.recvCountPname = ""
self.recvTypePname = ""
self.ioCountPname = ""
self.ioTypePname = ""
self.rmaCountPname = ""
self.rmaTypePname = ""
class xlateEntry:
def __init__ (self,mpiType,varName):
"initialize a new Fortran translation structure"
self.mpiType = mpiType
self.varName = varName
def ProcessDirectiveLine(lastFunction, line):
tokens = string.split(line)
if tokens[0] == "nowrapper":
fdict[lastFunction].nowrapper = 1
elif tokens[0] == "extrafield":
fdict[lastFunction].extrafieldsList.append(tokens[2])
fdict[lastFunction].extrafields[tokens[2]] = tokens[1]
else:
print "Warning: ",lastFunction," unknown directive [",string.strip(line),"]"
def ProcessWrapperPreLine(lastFunction, line):
#print "Processing wrapper pre [",string.strip(line),"] for ",lastFunction
fdict[lastFunction].wrapperPreList.append(line)
def ProcessWrapperPostLine(lastFunction, line):
#print "Processing wrapper post [",string.strip(line),"] for ",lastFunction
fdict[lastFunction].wrapperPostList.append(line)
def DumpDict():
for i in flist:
print i
if verbose:
print "\tParams\t",fdict[i].paramList
if fdict[i].wrapperPreList:
print "\tpre\t", fdict[i].wrapperPreList
if fdict[i].wrapperPostList:
print "\tpost\t", fdict[i].wrapperPostList
#####
##### Some MPI types we want to record. If there are pointers to them, deref them.
##### To simplify, assume that the value can be an 'int' for printing and assignment.
#####
def SpecialParamRecord(funct,param):
global flist
global fdict
basetype = fdict[funct].paramDict[param].basetype
pointerLevel = fdict[funct].paramDict[param].pointerLevel
arrayLevel = fdict[funct].paramDict[param].arrayLevel
simplePointer = (pointerLevel == 1) and (arrayLevel == 0)
if basetype == "MPI_Request" and simplePointer:
return 1
elif basetype == "MPI_Comm" and simplePointer:
return 1
elif basetype == "MPI_Datatype" and simplePointer:
return 1
elif basetype == "MPI_Group" and simplePointer:
return 1
elif basetype == "MPI_Info" and simplePointer:
return 1
elif basetype == "int" and simplePointer:
return 1
else:
return 0
#####
##### ParamDictUpdate - updates the datastructures for a function after
##### the basics have been entered. Must be called for each function.
#####
def ParamDictUpdate(fname):
global flist
global fdict
global messParamDict
global ioParamDict
global rmaParamDict
for p in fdict[fname].paramList:
## check for pointers, arrays
pname = "NULL"
basetype = "NULL"
pointerLevel = string.count(p,"*")
arrayLevel = string.count(p,"[")
if (pointerLevel > 0) and (arrayLevel > 0):
## handle pointers and arrays
pname = p[string.rfind(p,"*")+1:string.find(p,"[")]
basetype = p[0:string.find(p,"*")]
elif pointerLevel > 0:
## handle pointers
pname = p[string.rfind(p,"*")+1:len(p)]
basetype = p[0:string.find(p,"*")]
elif arrayLevel > 0:
## handle arrays
pname = p[string.find(p," "):string.find(p,"[")]
basetype = p[0:string.find(p," ")]
else:
## normal hopefully :)
tokens = string.split(p)
if len(tokens) == 1:
## must be void
pname = ""
basetype = "void"
else:
pname = string.strip(tokens[1])
basetype = string.strip(tokens[0])
pname = string.strip(pname)
basetype = string.strip(basetype)
fdict[fname].paramDict[pname] = VarDesc(pname,basetype,pointerLevel,arrayLevel)
fdict[fname].paramConciseList.append(pname)
# Identify and assign message size parameters
if messParamDict.has_key((fname,pname)):
paramMessType = messParamDict[(fname,pname)]
if paramMessType == 1:
fdict[fname].sendCountPname = pname
elif paramMessType == 2:
fdict[fname].sendTypePname = pname
elif paramMessType == 3:
fdict[fname].recvCountPname = pname
elif paramMessType == 4:
fdict[fname].recvTypePname = pname
# Identify and assign io size parameters
if ioParamDict.has_key((fname,pname)):
paramMessType = ioParamDict[(fname,pname)]
if paramMessType == 1:
fdict[fname].ioCountPname = pname
elif paramMessType == 2:
fdict[fname].ioTypePname = pname
# Identify and assign rma size parameters
if rmaParamDict.has_key((fname,pname)):
paramMessType = rmaParamDict[(fname,pname)]
if paramMessType == 1:
fdict[fname].rmaCountPname = pname
elif paramMessType == 2:
fdict[fname].rmaTypePname = pname
if (fdict[fname].paramDict[pname].pointerLevel == 0) \
and (fdict[fname].paramDict[pname].arrayLevel == 0) \
and (fdict[fname].paramDict[pname].basetype != "void"):
fdict[fname].paramDict[pname].recordIt = 1
elif SpecialParamRecord(fname,pname):
fdict[fname].paramDict[pname].recordIt = 1
else:
pass
if verbose:
#print "\t->",p
print "\t",pname, basetype, pointerLevel, arrayLevel
#####
##### Parses the input file and loads the information into the function dict.
#####
def ReadInputFile(f):
# parser states
p_start = "start"
p_directives = "directives"
p_wrapper_pre = "wrapper_pre"
p_wrapper_post = "wrapper_post"
parserState = p_start
global cnt
global fdict
global flist
fcounter = baseID
print "-----*----- Parsing input file:",f
while 1:
##### read a line from input
rawline = f.readline()
if not rawline:
break
cnt = cnt + 1
line = re.sub("\@.*$","",rawline)
##### break it into tokens
tokens = string.split(line)
if not tokens:
continue
##### determine what type of line this is and then parse it as required
if (string.find(line,"(") != -1) \
and (string.find(line,")") != -1) \
and (string.find(line,"MPI_") != -1) \
and parserState == p_start:
##### we have a prototype start line
name = tokens[1]
retype = tokens[0]
lparen = string.index(line,"(")
rparen = string.index(line,")")
paramstr = line[lparen+1:rparen]
paramList = map(string.strip,string.split(paramstr,","))
# print cnt, "-->", name, paramList
fdict[name] = fdecl(name, fcounter, retype, paramList, paramstr, line)
ParamDictUpdate(name)
lastFunction = name
if verbose:
print name
else:
##### DIRECTIVES
if tokens[0] == "directives" and parserState != p_directives:
##### beginning of directives
parserState = p_directives
elif tokens[0] == "directives" and parserState == p_directives:
##### end of directives
parserState = p_start
elif parserState == p_directives:
##### must be a directive, process it
ProcessDirectiveLine(lastFunction, line)
##### CODE WRAPPER PRE
elif tokens[0] == "wrapper_pre" and parserState != p_wrapper_pre:
##### beginning of wrapper_pre
parserState = p_wrapper_pre
elif tokens[0] == "wrapper_pre" and parserState == p_wrapper_pre:
##### end of wrapper_pre
parserState = p_start
elif parserState == p_wrapper_pre:
##### must be a directive, process it
ProcessWrapperPreLine(lastFunction, line)
##### CODE WRAPPER POST
elif tokens[0] == "wrapper_post" and parserState != p_wrapper_post:
##### beginning of wrapper_post
parserState = p_wrapper_post
elif tokens[0] == "wrapper_post" and parserState == p_wrapper_post:
##### end of wrapper_post
parserState = p_start
elif parserState == p_wrapper_post:
##### must be a directive, process it
ProcessWrapperPostLine(lastFunction, line)
##### UNKNOWN
else:
print "Unknown input line ",cnt, ":", line,
flist = fdict.keys()
flist.sort()
fcounter = baseID
for f in flist :
fdict[f].id = fcounter
if f not in noDefineList:
fcounter = fcounter + 1
print "-----*----- Parsing completed: ", len(fdict), " functions found."
###
### create a standard file header and return the init list
###
def StandardFileHeader(fname):
olist = []
olist.append("/* " + fname + " */\n")
olist.append("/* DO NOT EDIT -- AUTOMATICALLY GENERATED! */\n")
olist.append("/* Timestamp: " + time.strftime("%d %B %Y %H:%M", time.localtime(time.time())) + " */\n")
olist.append("/* Location: " + socket.gethostname () + " " + os.name + " */\n")
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
olist.append("\n")
olist.append("\n")
return olist
###
### Scan the lists of all functions and look for optimization
### opportunities (in space/speed).
###
### NOT USED
###
def ParameterOptimization():
global flist
global fdict
##### visit each function and update each functions parameter dictionary
for funct in flist:
if verbose:
print funct
ParamDictUpdate(funct)
###
### Create the structure files.
###
def GenerateStructureFile():
global flist
global fdict
print "-----*----- Generating structure files"
cwd = os.getcwd()
os.chdir(cwd)
sname = cwd + "/mpiPi_def.h"
g = open(sname, "w")
olist = StandardFileHeader(sname)
olist.append("\n")
olist.append("#define mpiPi_BASE " + str(baseID) + "\n")
olist.append("\n")
for funct in flist:
if funct not in noDefineList:
olist.append("#define mpiPi_" + funct + " " + str(fdict[funct].id) + "\n")
olist.append("\n\n/* eof */\n")
g.writelines(olist)
g.close()
###
### Generate a lookup table where mpiP can grab variables and function pointers.
###
def GenerateLookup():
global flist
global fdict
print "-----*----- Generating the lookup table"
cwd = os.getcwd()
os.chdir(cwd)
sname = cwd + "/lookup.c"
g = open(sname, "w")
olist = StandardFileHeader(sname)
olist.append("#include \"mpiPi.h\"\n")
olist.append("#include \"mpiPi_def.h\"\n")
olist.append("\n")
olist.append("\n")
olist.append("mpiPi_lookup_t mpiPi_lookup [] = {\n")
counter = 0
for funct in flist:
if funct not in noDefineList:
if counter < len(flist) \
and counter > 0 :
olist.append(",\n")
olist.append("\t{ mpiPi_" + funct)
olist.append(", \"" + funct + "\"")
olist.append("}")
counter = counter + 1
olist.append(",\n\t{0,NULL}};\n")
olist.append("\n")
olist.append("/* eof */\n")
g.writelines(olist)
g.close()
###
### Create a MPI wrapper for one function using the information in the function dict.
### First, generate a generic wrapper, and then the FORTRAN, C wrappers.
###
def CreateWrapper(funct, olist):
global fdict
global arch
if fdict[funct].nowrapper:
return
if verbose:
print "Wrapping ",funct
olist.append("\n\n\n/* --------------- " + funct + " --------------- */\n" )
#####
##### C wrapper
#####
olist.append("\n\n" + fdict[funct].returntype + " HPCRUN_MPI_WRAP("
+ fdict[funct].name + ") (" + fdict[funct].paramStr + "){" )
if fdict[funct].wrapperPreList:
olist.extend(fdict[funct].wrapperPreList)
if ((fdict[funct].sendCountPname != "") or (fdict[funct].recvCountPname != "")):
buffcount = fdict[funct].sendCountPname
bufftype = fdict[funct].sendTypePname
if (fdict[funct].sendCountPname == ""):
buffcount = fdict[funct].recvCountPname
bufftype = fdict[funct].recvTypePname
olist.append( "\n"
+ "if ( " + fdict[funct].sendTypePname + " != MPI_DATATYPE_NULL ) {\n"
+ " hpmpi_store_metric(Get_Msg_size(" +buffcount+ ", "+ bufftype + ") );\n"
+ "} else {\n TMSG(MPI,\"MPI_DATATYPE_NULL encountered. MPI_IN_PLACE not supported.\\n\");\n"
+ " TMSG(MPI,\"Values for %s may be invalid.\\n\", &(__func__)[7]);\n}\n")
olist.append("\nreturn P" + funct + "( " )
for i in fdict[funct].paramConciseList:
if (fdict[funct].paramDict[i].pointerLevel == 0) \
and (fdict[funct].paramDict[i].arrayLevel == 0) \
and (fdict[funct].paramDict[i].basetype != "void"):
olist.append(" " + i)
elif (fdict[funct].paramDict[i].pointerLevel > 0):
olist.append(i)
elif (fdict[funct].paramDict[i].arrayLevel > 0):
olist.append(i)
else:
print "Warning: passing on arg",i,"in",funct
if fdict[funct].paramConciseList.index(i) < len(fdict[funct].paramConciseList) - 1:
olist.append(", ")
olist.append(" );\n\n" )
olist.append("}" + " /* " + funct + " */\n")
#####
##### Fortran wrapper
#####
##### funct decl
olist.append("\n\nvoid " + "F77_" + string.upper(funct) + "(" )
#================================================================================
# In the case where MPI_Fint and and opaque objects such as MPI_Request are not the same size,
# we want to use MPI conversion functions.
#
# The most obvious problem we have encountered is for MPI_Request objects,
# but Communicators, Group, Datatype, Op, and File are also possibily problems.
#
# There are two cases:
# 1) A single argument needs to be translated.
# 2) An array of objects needs to be allocated and translated.
# This only appears necessary for Request and Datatype
#
# The algorithm for processing Fortran wrapper functions is as follows:
# 1. Declare all C variable versions for Fortran arguments.
# 2. Allocate any arrays to be used.
# 3. Perform any necessary pre-call array and scalar xlation.
# 4. Make the function call with appropriate C variables.
# 5. Perform any necessary post-call array and scalar xlation.
# 6. Free any arrays.
#================================================================================
### Type translation information
xlateVarName = ""
xlateVarNames = []
xlateTypes = []
xlateCount = 0
# Input types to translate
xlateTargetTypes = [ "MPI_Comm", "MPI_Datatype", "MPI_File", "MPI_Group", "MPI_Info", "MPI_Op", "MPI_Request" ]
freelist = []
# Iterate through the arguments for this function
opaqueFound = 0
for i in fdict[funct].paramConciseList:
if ( doOpaqueXlate is True and fdict[funct].paramDict[i].basetype in xlateTargetTypes ) :
# Verify that there is a Dictionary entry for translating this argument
if ( not ( opaqueInArgDict.has_key((funct, i)) or opaqueOutArgDict.has_key((funct, i)) ) ):
print "*** Failed to find translation information for " + funct + ":" + i + "\n"
opaqueFound = 1
# All Fortran opaque object are of type MPI_Fint
currBasetype = "MPI_Fint"
# Store variable name and type
xlateTypes.append(fdict[funct].paramDict[i].basetype)
xlateVarNames.append(i)
# Try to identify whether array or single value by whether "array" is in the variable name
# and add C declaration to declaration list.
if ( xlateVarNames[xlateCount].count("array") > 0 ):
decl += xlateTypes[xlateCount] + " *c_" + xlateVarNames[xlateCount] + ";\n";
else:
decl += xlateTypes[xlateCount] + " c_" + xlateVarNames[xlateCount] + ";\n";
xlateCount += 1
else:
# Not translating this variable
currBasetype = fdict[funct].paramDict[i].basetype
# Add argument to function declaration
olist.append(currBasetype + ' ')
if (fdict[funct].paramDict[i].pointerLevel == 0) \
and (fdict[funct].paramDict[i].arrayLevel == 0) \
and (fdict[funct].paramDict[i].basetype != "void"):
olist.append(" * ")
if (fdict[funct].paramDict[i].pointerLevel > 0):
for j in xrange(1,fdict[funct].paramDict[i].pointerLevel+1):
olist.append(" *")
olist.append(i)
if (fdict[funct].paramDict[i].arrayLevel > 0):
for x in range(0, fdict[funct].paramDict[i].arrayLevel) :
olist.append('[')
for x in range(0, fdict[funct].paramDict[i].arrayLevel) :
olist.append(']')
else:
pass
if fdict[funct].paramConciseList.index(i) < len(fdict[funct].paramConciseList) - 1:
olist.append(", ")
# Add ierr argument and declarations to output list
olist.append(" , MPI_Fint *ierr)")
olist.append("{")
olist.append("\n")
if fdict[funct].wrapperPreList:
olist.extend(fdict[funct].wrapperPreList)
if ( 'mips' in arch ) :
olist.append("void *saved_ret_addr = __builtin_return_address(0);\n")
else :
if ( useSetJmp == True ) :
olist.append("setjmp (jbuf);\n\n")
# Allocate any arrays used for translation
for i in range(len(xlateVarNames)) :
xlateVarName = xlateVarNames[i]
xlateType = xlateTypes[i]
# A pretty sketchy way of identifying an array size, but as far as I can tell,
# only one array is passed as an argument per function.
if ( fdict[funct].paramConciseList.count("count") > 1 ):
print "*** Multiple arrays in 1 function!!!!\n";
if ( "incount" in fdict[funct].paramConciseList ):
countVar = "incount";
elif ( "count" in fdict[funct].paramConciseList ):
countVar = "count";
else:
countVar = "max_integers"
if ( xlateVarName.count("array") > 0 ):
olist.append("c_" + xlateVarName + " = (" + xlateType + "*)malloc(sizeof(" + xlateType + ")*(*" + countVar + "));\n")
olist.append("if ( c_" + xlateVarName + " == NULL ) mpiPi_abort(\"Failed to allocate memory in " \
+ funct + "\");\n")
freelist.append("c_"+xlateVarName)
# Generate pre-call translation code if necessary by iterating through arguments that
# were identified as opaque objects needing translation above
for i in range(len(xlateVarNames)) :
# Set current argument name and type
xlateVarName = xlateVarNames[i]
xlateType = xlateTypes[i]
# Check for valid function:argument-name entry for pre-call translation.
if ( opaqueInArgDict.has_key((funct, xlateVarName)) \
and opaqueInArgDict[(funct, xlateVarName)] == xlateType ) :
# Datatype translation is the only call where the translation function
# doesn't match the argument type.
if ( xlateType == "MPI_Datatype" ):
xlateFuncType = "MPI_Type"
else:
xlateFuncType = xlateType
if ( xlateVarName.count("array") > 0 ):
olist.append("{\n int i; \n")
olist.append(" for (i = 0; i < *" + countVar + "; i++) { \n")
olist.append(" c_" + xlateVarName + "[i] = " + xlateFuncType + "_f2c(" + xlateVarName + "[i]);\n")
olist.append(" }\n}\n")
else:
olist.append("c_" + xlateVarName + " = " + xlateFuncType + "_f2c(*" + xlateVarName + ");\n")
xlateDone = 1
# Start generating call to C/Fortran common mpiP wrapper function
olist.append("\nint rc = " + funct + "( " )
argname = ""
# Iterate through mpiP wrapper function arguments, replacing argument with C version where appropriate
for i in fdict[funct].paramConciseList:
if ( i in xlateVarNames and
( opaqueInArgDict.has_key((funct, i)) or opaqueOutArgDict.has_key((funct, i))) ):
if ( i.count("array") > 0 ):
argname = "c_" + i;
else:
argname = "&c_" + i;
else:
argname = i
if (fdict[funct].paramDict[i].pointerLevel == 0) \
and (fdict[funct].paramDict[i].arrayLevel == 0) \
and (fdict[funct].paramDict[i].basetype != "void"):
olist.append("*"+argname)
elif (fdict[funct].paramDict[i].pointerLevel > 0):
olist.append(argname)
elif (fdict[funct].paramDict[i].arrayLevel > 0):
olist.append(argname)
else:
pass
if fdict[funct].paramConciseList.index(i) < len(fdict[funct].paramConciseList) - 1:
olist.append(", ")
olist.append(" );\n")
olist.append("*ierr = (MPI_Fint)rc;\n")
# Generate post-call translation code if necessary
xlateCode = []
xlateDone = 0
for i in range(len(xlateVarNames)) :
xlateVarName = xlateVarNames[i]
xlateType = xlateTypes[i]
if ( opaqueOutArgDict.has_key((funct, xlateVarName)) \
and opaqueOutArgDict[(funct, xlateVarName)] == xlateType ):
# Datatype translation is the only call where the translation function
# doesn't match the argument type.
if ( xlateType == "MPI_Datatype" ):
xlateFuncType = "MPI_Type"
else:
xlateFuncType = xlateType
# Generate array or scalar translation code
if ( xlateFortranArrayExceptions.has_key((funct, xlateVarName)) ) :
xlateCode.append(xlateVarName + "[*" + xlateFortranArrayExceptions[(funct,xlateVarName)] + \
"] = " + xlateFuncType + "_c2f(c_" + xlateVarName + \
"[*" + xlateFortranArrayExceptions[(funct, xlateVarName)] + "]);\n")
elif ( xlateVarName.count("array") > 0 ):
xlateCode.append("{\n int i; \n")
xlateCode.append(" for (i = 0; i < *" + countVar + "; i++) { \n")
xlateCode.append(" " + xlateVarName + "[i] = " + xlateFuncType + "_c2f(c_" + xlateVarName + "[i]);\n")
xlateCode.append(" }\n}\n")
else:
xlateCode.append("*" + xlateVarName + " = " + xlateFuncType + "_c2f(c_" + xlateVarName + ");\n")
xlateDone = 1
# If appropriate, increment any output indices
if incrementFortranIndexDict.has_key(funct) :
if incrementFortranIndexDict[funct][1] == 1 :
xlateCode.append("if ( " + incrementFortranIndexDict[funct][0] + " >= 0 ) (" + incrementFortranIndexDict[funct][0] + ")++;\n")
else:
xlateCode.append("{ int i; for ( i = 0; i < " + incrementFortranIndexDict[funct][1] + "; i++) " \
+ incrementFortranIndexDict[funct][0] + "[i]++;}\n")
if xlateDone == 1 :
olist.append("if ( rc == MPI_SUCCESS ) { \n")
#print " xlateCode is ", xlateCode
olist.extend(xlateCode)
olist.append("}\n")
# Free allocated arrays
for freeSym in freelist:
olist.append("free("+freeSym+");\n")
olist.append("} /* " + string.lower(funct) + " */\n")
#if ( opaqueFound == 1 and xlateDone == 0 ):
# print "Function " + funct + " not translated!\n"
print " Wrapped " + funct
def GenerateWrappers():
global flist
global fdict
global arch
global doWeakSymbols
print "-----*----- Generating profiling wrappers"
cwd = os.getcwd()
os.chdir(cwd)
sname = cwd + "/mpi-overrides.c"
g = open(sname, "w")
olist = StandardFileHeader(sname)
olist.append("#include <assert.h>\n")
olist.append("#include <ucontext.h>\n")
olist.append("#include <stdlib.h>\n")
olist.append("#include <unistd.h>\n")
olist.append("#include <mpi.h>\n")
olist.append("\n/**** local include files****/\n")
olist.append("#include <messages/messages.h>\n")
olist.append("#include <safe-sampling.h>\n")
olist.append("#include <sample_event.h>\n")
olist.append("#include <monitor-exts/monitor_ext.h>\n")
olist.append("#include \"symbols.h\"\n")
olist.append("\n/**** macros ****/\n")
olist.append("\n#define HPCRUN_MPI_WRAP MONITOR_EXT_WRAP_NAME\n")
olist.append("\n/**** external functions ****/\n")
olist.append("\nextern int hpcrun_mpi_metric_id();\n")
olist.append("\n/**** internal private functions ****/\n")
olist.append("\nstatic inline int Get_Msg_size( int count, MPI_Datatype datatype ){\n")
olist.append(" int dsize;\n PMPI_Type_size( datatype, &dsize );\n return count * dsize;\n}\n");
olist.append("\nstatic void hpmpi_store_metric(size_t bytes){\n");
olist.append(" ucontext_t uc;\n");
olist.append(" if (hpcrun_safe_enter()) {\n");
olist.append(" getcontext(&uc);\n");
olist.append(" sample_val_t sampleVal = hpcrun_sample_callpath(&uc, hpcrun_mpi_metric_id(), bytes, 0, 1);\n");
olist.append(" TMSG(MPI, \"sample: %p, bytes: %d\", sampleVal.sample_node, bytes);\n");
olist.append(" hpcrun_safe_exit();\n");
olist.append(" }\n");
olist.append("}\n");
for funct in flist:
CreateWrapper(funct, olist)
olist.append("\n")
olist.append("\n")
olist.append("/* eof */\n")
g.writelines(olist)
g.close()
def GetFortranSymbol(fsymtp, fsym) :
ofsym = ""
if fsymtp == 'symbol':
ofsym = string.lower(fsym)
elif fsymtp == 'symbol_':
ofsym = string.lower(fsym) + "_"
elif fsymtp == 'symbol__':
ofsym = string.lower(fsym) + "__"
elif fsymtp == 'SYMBOL':
ofsym = string.upper(fsym)
elif fsymtp == 'SYMBOL_':
ofsym = string.upper(fsym) + "_"
elif fsymtp == 'SYMBOL__':
ofsym = string.upper(fsym) + "__"
return ofsym
def GenerateWeakSymbols():
global flist
global f77symbol
#
# Generate Weak Symbols
#
cwd = os.getcwd()
os.chdir(cwd)
sname = cwd + "/weak-symbols.h"
g = open(sname, "w")
sname = cwd + "/weak-symbols-special.h"
s = open(sname, "w")
sname = cwd + "/weak-symbols-pcontrol.h"
p = open(sname, "w")
fmlist = ['symbol', 'symbol_', 'symbol__', 'SYMBOL', 'SYMBOL_', 'SYMBOL__' ]
if f77symbol in fmlist :
fmlist.remove(f77symbol)
symflist = copy.deepcopy(flist)
for funct in symflist:
dfunc = GetFortranSymbol(f77symbol, funct)
for mt in fmlist:
wfunc = GetFortranSymbol(mt, funct)
if funct in [ 'MPI_Init', 'MPI_Init_thread', 'MPI_Finalize'] :
s.write("#pragma weak " + wfunc + " = " + dfunc + "\n")
elif 'Pcontrol' in funct :
p.write("#pragma weak " + wfunc + " = " + dfunc + "\n")
elif fdict[funct].nowrapper == 0 :
g.write("#pragma weak " + wfunc + " = " + dfunc + "\n")
g.close()
p.close()
s.close()
def GenerateSymbolDefs():
global flist
global f77symbol
cwd = os.getcwd()
os.chdir(cwd)
sname = cwd + "/symbols.h"
symflist = copy.deepcopy(flist)
symflist.append('mpipi_get_fortran_argc')
symflist.append('mpipi_get_fortran_arg')
g = open(sname, "w")
for funct in symflist:
if f77symbol == 'symbol':
f77funct = string.lower(funct)
elif f77symbol == 'symbol_':
f77funct = string.lower(funct) + "_"
elif f77symbol == 'symbol__':
f77funct = string.lower(funct) + "__"
elif f77symbol == 'SYMBOL':
f77funct = string.upper(funct)
elif f77symbol == 'SYMBOL_':
f77funct = string.upper(funct) + "_"
elif f77symbol == 'SYMBOL__':
f77funct = string.upper(funct) + "__"
else:
f77funct = string.lower(funct)
g.write("#define F77_" + string.upper(funct) + " " + f77funct + "\n")
g.close()
def main():
global fdict
global flist
global f77symbol
global doOpaqueXlate
global arch
global doWeakSymbols
global useSetJmp
opts, pargs = getopt.getopt(sys.argv[1:], '', ['f77symbol=', 'xlate', 'arch=', 'weak', 'usesetjmp'])
print "MPI Wrapper Generator ($Revision: 442 $)"
#print "opts=",opts
#print "pargs=",pargs
f77symbol = 'symbol'
doOpaqueXlate = False
doWeakSymbols = False
useSetJmp = False
arch = 'unknown'
for o, a in opts:
print "o: ",o," a: ",a
if o == '--f77symbol':
f77symbol = a
if o == '--xlate':
doOpaqueXlate = True
if o == '--weak':
doWeakSymbols = True
if o == '--arch':
arch = a
if o == '--usesetjmp':
useSetJmp = True
##### Load the input file
if len(pargs) < 1:
f = sys.__stdin__
else:
f = open(pargs[0])
ReadInputFile(f)
print "-----*----- Beginning parameter optimization"
#ParameterOptimization()
#GenerateStructureFile()
GenerateWrappers()
GenerateSymbolDefs()
if doWeakSymbols == True :
GenerateWeakSymbols()
GenerateLookup()
#####
##### Call main
#####
main()
#
#
# <license>
#
# Copyright (c) 2006, The Regents of the University of California.
# Produced at the Lawrence Livermore National Laboratory
# Written by Jeffery Vetter and Christopher Chambreau.
# UCRL-CODE-223450.
# All rights reserved.
#
# This file is part of mpiP. For details, see http://mpip.sourceforge.net/.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the disclaimer (as noted below) in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the UC/LLNL nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF
# THE UNIVERSITY OF CALIFORNIA, THE U.S. DEPARTMENT OF ENERGY OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Additional BSD Notice
#
# 1. This notice is required to be provided under our contract with the
# U.S. Department of Energy (DOE). This work was produced at the
# University of California, Lawrence Livermore National Laboratory under
# Contract No. W-7405-ENG-48 with the DOE.
#
# 2. Neither the United States Government nor the University of
# California nor any of their employees, makes any warranty, express or
# implied, or assumes any liability or responsibility for the accuracy,
# completeness, or usefulness of any information, apparatus, product, or
# process disclosed, or represents that its use would not infringe
# privately-owned rights.
#
# 3. Also, reference herein to any specific commercial products,
# process, or services by trade name, trademark, manufacturer or
# otherwise does not necessarily constitute or imply its endorsement,
# recommendation, or favoring by the United States Government or the
# University of California. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the United States
# Government or the University of California, and shall not be used for
# advertising or product endorsement purposes.
#
# </license>
#
#
# --- EOF
| zcth428/hpctoolkit111 | src/tool/hpcrun/sample-sources/make-wrappers.py | Python | bsd-3-clause | 48,342 | [
"VisIt"
] | 50d7d78256d0912010a21b3592852a6d752714276f596f2c8bcb5f6f7c6e3f1e |
# Print Class
# - Will Print GroupFinder Events via PDF.
#
# Notes:
# - PDF strings, lines, and rectangles are a nightmare to move.
# - Editing PDF = Time Consuming.
from zope.interface import Interface
from Products.CMFCore.utils import getToolByName
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from uwosh.librarygroupfinder.browser import util
from uwosh.librarygroupfinder.browser.locations import Locations
from operator import itemgetter, attrgetter
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib import colors
from DateTime import DateTime
import datetime
from xml.dom import minidom
class IPrintMarker(Interface):
""" Marker Interface """
class Print(BrowserView):
DEFAULT_TABLE_LIMIT_SIZE = 21 #how many records to display in tables.
printableSides = 1 # default one-sided print option
def __call__(self):
#Get Response
response = self.request.response
#Set response header to handle PDF
response.setHeader('Content-Type', 'application/pdf')
#Possible Parameters
option = self.request.form.get('option', None)
sides = self.request.form.get('sides', None)
#Double Sided Printing?
if sides == "2" or sides == "two" or sides == "two-sided":
self.printableSides = 2
#PDF Format
if option == "office":
return self.generateOfficePDF(response)
elif option == "tables":
return self.generateTablesPDF(response)
elif option == "entrance":
return self.generateEntrancePDF(response)
def generateEntrancePDF(self,response):
#Attach name.pdf file to responses content disposition
response['Content-Disposition'] = 'attachment; filename=entrance.pdf'
#Create empty pdf document, hook pdf with response
pdf = Canvas(response)
pdf.setFillColor(colors.black) #sets Line/Rectangle Colors
pdf.roundRect(10, 755, 575, 75, 10, 1, 0)
pdf.setFont("Helvetica-Bold", 40)
pdf.setStrokeColorRGB(0, 0, 0) #sets Line/Rectangle Colors
pdf.drawCentredString(300, 790, "GroupFinder")
pdf.setFont("Helvetica-Bold", 20)
pdf.drawString(15, 765, "The following spaces are reserved during scheduled hours")
pdf.drawCentredString(300,725, datetime.datetime.now().strftime("%A, %B %d, %Y"))
#Get Todays Events
brains = sorted(util.gatherTodaysEvents(self), key=attrgetter('start','Title'))
index = 700
i = 0
for brain in brains:
pdf.rect(45, index-30, 510, 42, stroke=1, fill=0) #Schedule List Rectangles
if util.isPublic(self,brain.id):
title = brain.Title
else:
title = "Private Group"
pdf.setFont("Helvetica-Bold", 17)
pdf.drawString(50, index-5, DateTime(brain.start).strftime("%I:%M %p").lower() +
" - " + DateTime(brain.end).strftime("%I:%M %p").lower() +
" : " + title)
pdf.setFont("Helvetica", 17)
l = self.locationLookup(brain.location)
pdf.drawString(50, index-25, "Location: " + l['Name'] + " - " + l['DirectionsShort'])
index -= 42
i += 1
if i == 13:
pdf.setFont("Helvetica", 17)
pdf.drawCentredString(300, index-5, "See Website For More Study Groups!")
break
pdf.setFont("Helvetica-Bold", 28)
pdf.drawCentredString(300, 90, "Use GroupFinder to Reserve a Study Space.")
pdf.setFont("Helvetica", 24)
pdf.drawCentredString(300, 60, "http://www.uwosh.edu/library/groupfinder")
pdf = self.tableFooter(pdf)
pdf.showPage() #next page, finalize last page.
pdf.save() #save the pdf content
return response #return response with hooked pdf.
def generateOfficePDF(self,response):
#Attach name.pdf file to responses content disposition
response['Content-Disposition'] = 'attachment; filename=office.pdf'
#Create empty pdf document, hook pdf with response
pdf = Canvas(response)
#Get Todays Events
brains = sorted(util.gatherTodaysEvents(self), key=attrgetter('location')) #so awesome, sorts on any attribute!
brains = sorted(brains, key=attrgetter('start')) #even better a secondary sort.
#Header: Title Information and Settings
pdf.setFont("Helvetica-Bold", 12)
pdf.setStrokeColorRGB(0, 0, 0) #sets Line/Rectangle Colors
#Header Left Title
if brains != None and len(brains) > 0:
pdf.drawString(15, 810, DateTime(brains[0].start).strftime("%A, %B %d, %Y") + " Schedule")
else:
pdf.drawString(15, 810, "No Groups scheduled for " + datetime.datetime.now().strftime("%A, %B %d, %Y"))
#Header Right Title
pdf.drawRightString(575, 810, "GroupFinder")
#Body: List of Groups and Settings
index = 792 #Pixel Index, starting at the top of the pdf page
page = 1 #Page Number
for brain in brains:
pdf.setFont("Helvetica", 12)
pdf.setStrokeColorRGB(0, 0, 0) #sets Line/Rectangle Colors
pdf.rect(10, index-20, 575, 30, stroke=1, fill=0) #Rectangle around each Group
pdf.drawString(15, index-3, brain.Title) #Group Description
l = self.locationLookup(brain.location)
pdf.drawString(15, index-15, DateTime(brain.start).strftime("%I:%M %p") +
" - " + DateTime(brain.end).strftime("%I:%M %p") +
" in " + l['Name'])
index -= 30 #Move Pixel Index downwards
#Reach Bottom of page? Creates New Page.
if index < 30:
pdf.drawString(15, 5, "Page " + str(page))#add page number pages
pdf.drawCentredString(300, 5, "Created on " + datetime.datetime.now().strftime("%m/%d/%Y at %I:%M %p"))
page+=1
index = 792
pdf.showPage() #next page
#add page number pages
pdf.drawString(15, 5, "Page " + str(page))
#add date PDF was created
pdf.drawCentredString(300, 5, "Created on " + datetime.datetime.now().strftime("%m/%d/%Y at %I:%M %p"))
pdf.showPage() #next page, finalize last page.
pdf.save() #save the pdf content
return response #return response with hooked pdf.
def generateTablesPDF(self, response):
response['Content-Disposition'] = 'attachment; filename=tables.pdf'
pdf = Canvas(response)
pdf.setStrokeColorRGB(0, 0, 0)
brains = sorted(util.gatherTodaysEvents(self), key=attrgetter('location','start'))
# Organize for Double-Sided Printing...
prev = None
mainArray = [] #init
subArray = [] #init
for brain in brains:
if prev == brain.location:
mainArray[len(mainArray)-1].append(brain)
else:
subArray = [] #reset list
subArray.append(brain)
mainArray.append(subArray)
prev = brain.location #remember past
# PDF Pages
initPage = False
for sarr in mainArray:
#for Handles Two-Sided Pages!
for j in range(self.printableSides):
if initPage:
pdf.showPage()
else:
initPage = True
l = self.locationLookup(sarr[0].location)
# HEADER FOOTER ------------------------------------------------------------
pdf = self.tableHeader(pdf,l['Name'],l['Description']) #Setup Header
pdf = self.tableInformation(pdf) #Setup Information
pdf = self.tableLink(pdf)
pdf = self.tableFooter(pdf) #Setup Footer
index = 695 #reset index for new page
i = 0 #Reset for new page
pdf.setFont("Helvetica-Bold", 18)
pdf.drawCentredString(300,715, datetime.datetime.now().strftime("%A, %B %d, %Y") + " Schedule")
# BODY ------------------------------------------------------------
for brain in sarr:
#Body: Schedule Side
pdf.rect(45, index-10, 510, 20, stroke=1, fill=0) #Schedule List Rectangles
pdf.setFont("Helvetica", 17)
if util.isPublic(self,brain.id):
title = brain.Title
else:
title = "Private Group"
pdf.drawString(50, index-5, DateTime(brain.start).strftime("%I:%M %p").lower() +
" - " + DateTime(brain.end).strftime("%I:%M %p").lower() +
" : " + title)
index -= 20
i += 1
if i == self.DEFAULT_TABLE_LIMIT_SIZE:
pdf.drawCentredString(300, index-5, "More groups are scheduled after last posted time,")
pdf.drawCentredString(300, index-24, "please see Checkout and Reserves Desk.")
break
pdf.showPage()
pdf.save()
return response
#Table Format Header
def tableHeader(self,pdf,location,description):
pdf.setFont("Helvetica-Bold", 26)
pdf.setStrokeColorRGB(0, 0, 0) #sets Line/Rectangle Colors
pdf.drawCentredString(300, 795, "GroupFinder : " + location)
#pdf.setFont("Helvetica", 14)
#pdf = self.textCenteredWrap(pdf,description)
pdf.setFont("Helvetica-Bold", 20)
pdf.drawCentredString(300, 762, "This space is reserved during scheduled hours")
pdf.setFillColor(colors.black) #sets Line/Rectangle Colors
pdf.roundRect(10, 745, 575, 85, 10, 1, 0)
return pdf
def textCenteredWrap(self,pdf,text):
pdf.setFont("Helvetica", 12)
lines = []
i = 780
if len(text) > 91:
for c in text:
lines.append(text[0:91])
text = text[91:len(text)]
if len(text) == 0:
break
for l in lines:
pdf.drawCentredString(300, i, l)
i -= 12
else:
pdf.drawCentredString(300, i, text)
self.leftOff = i
return pdf
#Table Format Footer
def tableFooter(self,pdf):
pdf.drawImage(self.context.absolute_url() +
"/++resource++uwosh.librarygroupfinder.stylesheets/images/watermark.jpg",233,15,width=131,height=25)
return pdf
def tableLink(self,pdf):
pdf.setFont("Helvetica-Bold", 28)
pdf.drawCentredString(300, 225, "Use GroupFinder To Reserve This Space.")
pdf.setFont("Helvetica", 24)
pdf.drawCentredString(300, 200, "http://www.uwosh.edu/library/groupfinder")
return pdf
#Table Format Information Box
def tableInformation(self,pdf):
pdf.setFont("Helvetica-Bold", 12)
pdf.drawString(45,170, "Information") #Information Title
pdf.setFont("Helvetica", 12)
pdf.roundRect(45, 55, 510, 110, 10, stroke=1, fill=0) #Information Rectangle
#Information 1) 2) 3) etc...
pdf.drawString(50,145, "1) Scheduled study groups have rights to " +
"this area during their scheduled time.")
pdf.drawString(50,125, "2) Study groups created today are not guaranteed a " +
"reservation. To ensure a reservation")
pdf.drawString(50,110, "the group must be scheduled prior to today.") #newline
pdf.drawString(50,90, "3) Students, Tutors and Instructors can schedule a study group.")
pdf.drawString(50,70, "4) Please visit the Checkout and Reserves Desk with any questions.")
return pdf
#Library WaterMark, instead of using image, this mimics the logo.
def waterMark(self,pdf,x,y):
pdf.setFont("Helvetica-Bold", 14)
pdf.drawString(x, y, "Polk")
pdf.setFont("Helvetica", 14)
pdf.drawString(x+30, y, "Library")
return pdf
def locationLookup(self,id):
locations = Locations(self.context,self.request)
location = locations.getLocationByUniqueId(id)
if location != None:
return location
return None
| uwosh/uwosh.librarygroupfinder | uwosh/librarygroupfinder/browser/print.py | Python | gpl-2.0 | 13,673 | [
"VisIt"
] | 9c9babb23d9b7c865acdd88774869d92f27dc63faab4e630d041920611783072 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import calendar
from datetime import timedelta ,datetime
from time import strptime
from glob import glob
import re
import string
import os
from ocw.dataset import Dataset
import ocw.utils as utils
import netCDF4
import numpy
import numpy.ma as ma
LAT_NAMES = ['x', 'rlat', 'rlats', 'lat', 'lats', 'latitude', 'latitudes']
LON_NAMES = ['y', 'rlon', 'rlons', 'lon', 'lons', 'longitude', 'longitudes']
TIME_NAMES = ['time', 'times', 'date', 'dates', 'julian']
def _get_netcdf_variable_name(valid_var_names, netcdf, netcdf_var):
''' Determine if one of a set of variable names are in a NetCDF Dataset.
Looks for an occurrence of a valid_var_name in the NetCDF variable data.
This is useful for automatically determining the names of the lat, lon,
and time variable names inside of a dataset object.
:param valid_var_names: The possible variable names to search for in
the netCDF object.
:type valid_var_names: List of Strings
:param netcdf: The netCDF Dataset object in which to check for
valid_var_names.
:type netcdf: netcdf4.Dataset
:param netcdf_var: The relevant variable name to search over in the
netcdf object. This is used to narrow down the search for valid
variable names by first checking the desired variable's dimension
values for one or more of the valid variable names.
:returns: The variable from valid_var_names that it locates in
the netCDF object.
:raises ValueError: When unable to locate a single matching variable
name in the NetCDF Dataset from the supplied list of valid variable
names.
'''
# Check for valid variable names in netCDF variable dimensions
dimensions = netcdf.variables[netcdf_var].dimensions
dims_lower = [dim.encode().lower() for dim in dimensions]
intersect = set(valid_var_names).intersection(dims_lower)
if len(intersect) == 1:
# Retrieve the name of the dimension where we found the matching
# variable name
index = dims_lower.index(intersect.pop())
dimension_name = dimensions[index].encode()
# Locate all of the variables that share the dimension that we matched
# earlier. If the dimension's name matches then that variable is
# potentially what we want to return to the user.
possible_vars = []
for var in netcdf.variables.keys():
var_dimensions = netcdf.variables[var].dimensions
# Skip any dimensions are > 1D
if len(var_dimensions) != 1:
continue
if var_dimensions[0].encode() == dimension_name:
possible_vars.append(var)
# If there are multiple variables with matching dimension names then we
# aren't able to determining the correct variable name using the
# variable dimensions. We need to try a different approach. Otherwise,
# we're done!
if len(possible_vars) == 1:
return possible_vars[0]
# Check for valid variable names in netCDF variable names
variables = netcdf.variables.keys()
vars_lower = [var.encode().lower() for var in variables]
intersect = set(valid_var_names).intersection(vars_lower)
if len(intersect) == 1:
index = vars_lower.index(intersect.pop())
return variables[index]
# If we couldn't locate a single matching valid variable then we're unable
# to automatically determine the variable names for the user.
error = (
"Unable to locate a single matching variable name from the "
"supplied list of valid variable names. "
)
raise ValueError(error)
def load_WRF_2d_files(file_path,
filename_pattern,
variable_name,
name=''):
''' Load multiple WRF (or nuWRF) original output files containing 2D fields such as precipitation and surface variables into a Dataset.
The dataset can be spatially subset.
:param file_path: Directory to the NetCDF file to load.
:type file_path: :mod:`string`
:param filename_pattern: Path to the NetCDF file to load.
:type filename_pattern: :list:`string`
:param variable_name: The variable name to load from the NetCDF file.
:type variable_name: :mod:`string`
:param name: (Optional) A name for the loaded dataset.
:type name: :mod:`string`
:returns: An OCW Dataset object with the requested variable's data from
the NetCDF file.
:rtype: :class:`dataset.Dataset`
:raises ValueError:
'''
WRF_files = []
for pattern in filename_pattern:
WRF_files.extend(glob(file_path + pattern))
WRF_files.sort()
file_object_first = netCDF4.Dataset(WRF_files[0])
lats = file_object_first.variables['XLAT'][0,:]
lons = file_object_first.variables['XLONG'][0,:]
times = []
for ifile, file in enumerate(WRF_files):
file_object = netCDF4.Dataset(file)
time_struct_parsed = strptime(file[-19:],"%Y-%m-%d_%H:%M:%S")
for ihour in numpy.arange(24):
times.append(datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
values0= file_object.variables[variable_name][:]
if ifile == 0:
values = file_object.variables[variable_name][:]
else:
values = numpy.concatenate((values, file_object.variables[variable_name][:]))
file_object.close()
times = numpy.array(times)
return Dataset(lats, lons, times, values, variable_name, name=name)
def load_file(file_path,
variable_name,
variable_unit = None,
elevation_index=0,
name='',
lat_name=None,
lon_name=None,
time_name=None):
''' Load a NetCDF file into a Dataset.
:param file_path: Path to the NetCDF file to load.
:type file_path: :mod:`string`
:param variable_name: The variable name to load from the NetCDF file.
:type variable_name: :mod:`string`
:param variable_unit: (Optional) The variable unit to load from the NetCDF file.
:type variable_unit: :mod:`string`
:param elevation_index: (Optional) The elevation index for which data should
be returned. Climate data is often times 4 dimensional data. Some
datasets will have readins at different height/elevation levels. OCW
expects 3D data so a single layer needs to be stripped out when loading.
By default, the first elevation layer is used. If desired you may
specify the elevation value to use.
:type elevation_index: :class:`int`
:param name: (Optional) A name for the loaded dataset.
:type name: :mod:`string`
:param lat_name: (Optional) The latitude variable name to extract from the
dataset.
:type lat_name: :mod:`string`
:param lon_name: (Optional) The longitude variable name to extract from the
dataset.
:type lon_name: :mod:`string`
:param time_name: (Optional) The time variable name to extract from the
dataset.
:type time_name: :mod:`string`
:returns: An OCW Dataset object with the requested variable's data from
the NetCDF file.
:rtype: :class:`dataset.Dataset`
:raises ValueError: When the specified file path cannot be loaded by ndfCDF4
or when the lat/lon/time variable name cannot be determined
automatically.
'''
try:
netcdf = netCDF4.Dataset(file_path, mode='r')
except RuntimeError:
err = "Dataset filepath is invalid. Please ensure it is correct."
raise ValueError(err)
except:
err = (
"The given file cannot be loaded. Please ensure that it is a valid "
"NetCDF file. If problems persist, report them to the project's "
"mailing list."
)
raise ValueError(err)
if not lat_name:
lat_name = _get_netcdf_variable_name(LAT_NAMES, netcdf, variable_name)
if not lon_name:
lon_name = _get_netcdf_variable_name(LON_NAMES, netcdf, variable_name)
if not time_name:
time_name = _get_netcdf_variable_name(TIME_NAMES, netcdf, variable_name)
lats = netcdf.variables[lat_name][:]
lons = netcdf.variables[lon_name][:]
time_raw_values = netcdf.variables[time_name][:]
times = utils.decode_time_values(netcdf, time_name)
times = numpy.array(times)
values = ma.array(netcdf.variables[variable_name][:])
variable_unit = netcdf.variables[variable_name].units
# If the values are 4D then we need to strip out the elevation index
if len(values.shape) == 4:
# Determine the set of possible elevation dimension names excluding
# the list of names that are used for the lat, lon, and time values.
dims = netcdf.variables[variable_name].dimensions
dimension_names = [dim_name.encode() for dim_name in dims]
lat_lon_time_var_names = [lat_name, lon_name, time_name]
elev_names = set(dimension_names) - set(lat_lon_time_var_names)
# Grab the index value for the elevation values
level_index = dimension_names.index(elev_names.pop())
# Strip out the elevation values so we're left with a 3D array.
if level_index == 0:
values = values [elevation_index,:,:,:]
elif level_index == 1:
values = values [:,elevation_index,:,:]
elif level_index == 2:
values = values [:,:,elevation_index,:]
else:
values = values [:,:,:,elevation_index]
origin = {
'source': 'local',
'path': file_path,
'lat_name': lat_name,
'lon_name': lon_name,
'time_name': time_name
}
if elevation_index != 0: origin['elevation_index'] = elevation_index
return Dataset(lats, lons, times, values, variable=variable_name,
units=variable_unit, name=name, origin=origin)
def load_multiple_files(file_path,
variable_name,
dataset_name='model',
variable_unit=None,
lat_name=None,
lon_name=None,
time_name=None):
''' load multiple netcdf files with common filename pattern and return an array of OCW datasets
:param file_path: directory name and common file name patterns where the NetCDF files to load are stored.
:type file_path: :mod:`string`
:param dataset_name: a name of dataset when reading a single file
:type dataset_name: :mod:'string'
:param variable_name: The variable name to load from the NetCDF file.
:type variable_name: :mod:`string`
:param variable_unit: (Optional) The variable unit to load from the NetCDF file.
:type variable_unit: :mod:`string`
:param elevation_index: (Optional) The elevation index for which data should
be returned. Climate data is often times 4 dimensional data. Some
datasets will have readins at different height/elevation levels. OCW
expects 3D data so a single layer needs to be stripped out when loading.
By default, the first elevation layer is used. If desired you may
specify the elevation value to use.
:param lat_name: (Optional) The latitude variable name to extract from the
dataset.
:type lat_name: :mod:`string`
:param lon_name: (Optional) The longitude variable name to extract from the
dataset.
:type lon_name: :mod:`string`
:param time_name: (Optional) The time variable name to extract from the
dataset.
:type time_name: :mod:`string`
:returns: An array of OCW Dataset objects, an array of dataset names
:rtype: :class:`list`
'''
data_filenames = []
data_filenames.extend(glob(file_path))
data_filenames.sort()
# number of files
ndata = len(data_filenames)
if ndata == 1:
data_name = [dataset_name]
else:
data_name = []
data_filenames_reversed = []
for element in data_filenames:
data_filenames_reversed.append(element[::-1])
prefix = os.path.commonprefix(data_filenames)
postfix = os.path.commonprefix(data_filenames_reversed)[::-1]
for element in data_filenames:
data_name.append(element.replace(prefix,'').replace(postfix,''))
datasets = []
for ifile,filename in enumerate(data_filenames):
datasets.append(load_file(filename, variable_name, variable_unit, name=data_name[ifile],
lat_name=lat_name, lon_name=lon_name, time_name=time_name))
return datasets, data_name
| pwcberry/climate | ocw/data_source/local.py | Python | apache-2.0 | 13,413 | [
"NetCDF"
] | 0d35d6efc97af67d8dbaed6f45cacb0f42fe74fa766ae652f05d88a7664b8472 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
test_parallel.py
================
Trying to task farming parameter exploration
99% inspired by parameter_search_example.py from Michael Schmuker
To run this script, you first have to invoke an IPython controller and
computation engines. If IPython is installed correctly and with parallel
computation support, you can just type:
> ipcluster -n 2 &
This will start two computation engines and a controller in the background.
When the controller is up, run this script:
> python parameter_search_example.py
Calculation will start, and after a few seconds (depending on your hardware)
it will save a png graphics file that illustrates the firing rate of a neuron
as a function of the input rate and the weight of the synapse to your current
directory.
Laurent Perrinet, INCM, CNRS
$ Id $
"""
def model_network(param_dict):
"""
This model network consists of a spike source and a neuron (IF_curr_alpha).
The spike rate of the source and the weight can be specified in the
param_dict. Returns the number of spikes fired during 1000 ms of simulation.
Parameters:
param_dict - dictionary with keys
rate - the rate of the spike source (spikes/second)
weight - weight of the connection source -> neuron
Returns:
dictionary with keys:
source_rate - the rate of the spike source
weight - weight of the connection source -> neuron
neuron_rate - spike rate of the neuron
"""
#set up the network
from retina import Retina
retina = Retina(param_dict['N'])
params = retina.params
params.update(param_dict) # updates what changed in the dictionary
# simulate the experiment and get its data
data = retina.run(params)#,verbose=False)
neuron_rate = data['out_ON_DATA'].mean_rate()
print neuron_rate
# return everything, including the input parameters
return {'snr':param_dict['snr'],
'kernelseed':param_dict['kernelseed'],
'neuron_rate': neuron_rate}
def make_param_dict_list(N):
"""
create a list of parameter dictionaries for the model network.
"""
N_snr, N_seeds = 5, 10
from NeuroTools.parameters import ParameterSpace, ParameterRange
import numpy
params = ParameterSpace({
'N' : N,
'snr' : ParameterRange(list(numpy.linspace(0.1,2.0,N_snr))),
'kernelseed' : ParameterRange(list([12345+ k for k in range(N_seeds)]))})
dictlist = [p.as_dict() for p in params.iter_inner()]
return dictlist
def show_results(result):
"""
visualizes the result of the parameter search.
Parameters:
result - list of result dictionaries.
"""
import numpy
t_smooth = 100. # ms. integration time to show fiber activity
snrs = numpy.sort([r['snr'] for r in result])
neuron_rates = numpy.zeros(len(snr))
for snr_i in range(len(snrs)):
neuron_rates[r_i] = [r['neuron_rate'] for r in result
if (r['source_rate'] == snrs[snr_i])][0]
import NeuroTools.plotting as plotting
pylab = plotting.get_display(True)
pylab.rcParams.update(plotting.pylab_params())
print rates, neuron_rates
subplot = pylab.imshow( neuron_rates,
interpolation = 'nearest',
origin = 'lower')
plotting.set_labels(subplot.get_axes(),
xlabel = 'rate',
ylabel = 'weight')
pylab.colorbar()
# could add fancy xticks and yticks here
import tempfile, os
(fd, figfilename) = tempfile.mkstemp(prefix = 'parameter_search_result',
suffix = '.png',
dir = os.getcwd())
pylab.gcf().savefig(figfilename)
def run_it(N):
""""
Run the parameter search.
"""
import sys
sys.path.append('../parameter_search/')
import parameter_search as ps
# search the parameter space around the operating point
param_dict_list = make_param_dict_list(N)
srchr = ps.IPythonParameterSearcher(
dictlist = param_dict_list,
func = model_network)
srchr.search()
outlist = srchr.harvest()
#return the results
return outlist
if __name__ == '__main__':
results = run_it(N=100)
show_results(results)
| NeuralEnsemble/NeuroTools | examples/retina/test_parallel.py | Python | gpl-2.0 | 4,502 | [
"NEURON"
] | 706d8896f407213c429b27b2c4cbe97db67173b3daaea2ddea238e7a5ec8b9c2 |
import pybullet as p
import time
#you can visualize the timings using Google Chrome, visit about://tracing
#and load the json file
p.connect(p.GUI)
t = time.time() + 3.1
logId = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "chrome_about_tracing.json")
while (time.time() < t):
p.stepSimulation()
p.submitProfileTiming("pythontest")
time.sleep(1./240.)
p.submitProfileTiming("nested")
for i in range (100):
p.submitProfileTiming("deep_nested")
p.submitProfileTiming()
time.sleep(1./1000.)
p.submitProfileTiming()
p.submitProfileTiming()
p.stopStateLogging(logId)
| MadManRises/Madgine | shared/bullet3-2.89/examples/pybullet/examples/profileTiming.py | Python | mit | 598 | [
"VisIt"
] | b74b676726b7032e3e6ee754c7f7f6ad3f833d1fff0786718d981be62858ba00 |
#!/usr/bin/env python
# Purpose: Demonstrates different plots from the matplotlib examples collection
# Author: Ken McIvor <mcivor@iit.edu>, deriving from the matplotlib examples
# collection
#
# Copyright 2002-2004 John D. Hunter, 2005 Illinois Institute of Technology
#
# Distributed under the license agreement for matplotlib 0.72.
#
# For information on the usage and redistribution of this file, and for a
# DISCLAIMER OF ALL WARRANTIES, see the file "LICENSE" that ships with the
# matplotlib 0.72 or http://matplotlib.sourceforge.net/license.html
__version__ = '1.0'
import wx
import wxmplitf
import matplotlib
import matplotlib.cm as cm
from pylab import array, arange, sin, cos, exp, pi, randn, normpdf, meshgrid, \
convolve
def plot_simple(fig):
t = arange(0.0, 2.0, 0.01)
s = sin(2*pi*t)
c = cos(2*pi*t)
axes = fig.gca()
axes.plot(t, s, linewidth=1.0)
axes.plot(t, c, linewidth=1.0)
axes.set_xlabel('time (s)')
axes.set_ylabel('voltage (mV)')
axes.set_title('About as simple as it gets, folks')
axes.grid(True)
def plot_subplot(fig):
def f(t):
return cos(2*pi*t) * exp(-t)
t1 = arange(0.0, 5.0, 0.10)
t2 = arange(0.0, 5.0, 0.02)
a1 = fig.add_subplot(2, 1, 1)
a1.plot(t1, f(t1), 'bo')
a1.plot(t2, f(t2), 'k')
a1.grid(True)
a1.set_title('A Tale of 2 Subplots')
a1.set_ylabel('Damped oscillation')
a2 = fig.add_subplot(2, 1, 2)
a2.plot(t2, cos(2*pi*t2), 'r>')
a2.grid(True)
a2.set_xlabel('time (s)')
a2.set_ylabel('Undamped')
def plot_subplot_sharex(fig):
def f(t):
return cos(2*pi*t) * exp(-t)
t1 = arange(0.0, 5.0, 0.10)
t2 = arange(0.0, 5.0, 0.02)
a1 = fig.add_subplot(2, 1, 1)
a1.plot(t1, f(t1), 'bo')
a1.plot(t2, f(t2), 'k')
a1.grid(True)
a1.set_title('Two Subplots Sharing an Axis')
a1.set_ylabel('Damped oscillation')
for ticklabel in a1.get_xticklabels():
ticklabel.set_visible(False)
a2 = fig.add_subplot(2, 1, 2, sharex=a1)
a2.plot(t2, cos(2*pi*t2), 'r>')
a2.grid(True)
a2.set_xlabel('time (s)')
a2.set_ylabel('Undamped')
def plot_histogram(fig):
mu, sigma = 100, 15
x = mu + sigma*randn(10000)
axes = fig.gca()
# the histogram of the data
n, bins, patches = axes.hist(x, 100, normed=1)
# add a 'best fit' line
y = normpdf( bins, mu, sigma)
l = axes.plot(bins, y, 'r--', linewidth=2)
axes.set_xlim((40, 160))
axes.set_xlabel('Smarts')
axes.set_ylabel('P')
axes.set_title('IQ: mu=100, sigma=15')
# axes.set_title(r'$\rm{IQ:}\/ \mu=100,\/ \sigma=15$')
def plot_fill(fig):
t = arange(0.0, 1.01, 0.01)
s = sin(2*2*pi*t)
axes = fig.gca()
axes.fill(t, s*exp(-5*t), 'r')
axes.grid(True)
def plot_log(fig):
dt = 0.01
t = arange(dt, 20.0, dt)
a1 = fig.add_subplot(2, 1, 1)
a1.semilogx(t, sin(2*pi*t))
a1.set_ylabel('semilogx')
a1.grid(True)
a2 = fig.add_subplot(2, 1, 2)
a2.loglog(t, 20*exp(-t/10.0), basey=4)
a2.xaxis.grid(True, which='minor') # minor grid on too
a2.set_xlabel('time (s)')
a2.set_ylabel('loglog')
a2.grid(True)
def plot_polar(fig):
import pylab
r = arange(0,1,0.001)
theta = 2*2*pi*r
# radar green, solid grid lines
matplotlib.rc('grid', color='#316931', linewidth=1, linestyle='-')
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True, axisbg='#d5de9c')
ax.plot(theta, r, color='#ee8d18', lw=3)
ax.set_title("And there was much rejoicing!", fontsize=14)
matplotlib.rcdefaults()
def plot_polar_subplot(fig):
#
# Polar demo
#
import pylab
r = arange(0,1,0.001)
theta = 2*2*pi*r
# radar green, solid grid lines
matplotlib.rc('grid', color='#316931', linewidth=1, linestyle='-')
ax = fig.add_subplot(1, 2, 1, polar=True, axisbg='#d5de9c')
ax.plot(theta, r, color='#ee8d18', lw=3)
ax.set_title("And there was much rejoicing!", fontsize=14)
matplotlib.rcdefaults()
#
# First part of the subplot demo
#
def f(t):
return cos(2*pi*t) * exp(-t)
t1 = arange(0.0, 5.0, 0.10)
t2 = arange(0.0, 5.0, 0.02)
A1 = fig.add_subplot(1, 2, 2)
A1.plot(t1, f(t1), 'bo')
A1.plot(t2, f(t2), 'k')
A1.grid(True)
A1.set_title('A tale of one subplot')
A1.set_ylabel('Damped oscillation', fontsize=10)
A1.set_xlabel('time (s)', fontsize=10)
def plot_legend(fig):
a = arange(0,3,.02)
b = arange(0,3,.02)
c = exp(a)
d = c.tolist()
d.reverse()
d = array(d)
axes = fig.gca()
line1, = axes.plot(a,c,'k--', label = 'Model length')
line2, = axes.plot(a,d,'k:', label = 'Data length')
line3, = axes.plot(a,c+d,'k', label = 'Total message length')
# Now add the legend with some customizations.
legend = axes.legend(loc='upper center', shadow=True)
legend.get_frame().set_alpha(0.4)
# we will set up a dict mapping legend line to orig line, and enable
# picking on the legend line
lines = [line1, line2, line3]
lined = dict()
for legline, origline in zip(legend.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
axes.set_ylim([-1,20])
axes.grid(False)
axes.set_xlabel('Model complexity --->')
axes.set_ylabel('Message length --->')
axes.set_title('Minimum Message Length')
axes.set_xticklabels([])
axes.set_yticklabels([])
return lined
def plot_image(fig):
def func3(x,y):
return (1- x/2 + x**5 + y**3)*exp(-x**2-y**2)
dx, dy = 0.025, 0.025
x = arange(-3.0, 3.0, dx)
y = arange(-3.0, 3.0, dy)
X,Y = meshgrid(x, y)
Z = func3(X, Y)
axes = fig.gca()
im = axes.imshow(Z, cmap=cm.jet, extent=(-3, 3, -3, 3))
def plot_layered_images(fig):
def func3(x,y):
return (1- x/2 + x**5 + y**3)*exp(-x**2-y**2)
# make these smaller to increase the resolution
dx, dy = 0.05, 0.05
x = arange(-3.0, 3.0, dx)
y = arange(-3.0, 3.0, dy)
X,Y = meshgrid(x, y)
# when layering multiple images, the images need to have the same
# extent. This does not mean they need to have the same shape, but
# they both need to render to the same coordinate system determined by
# xmin, xmax, ymin, ymax
xmin, xmax, ymin, ymax = min(x), max(x), min(y), max(y)
extent = xmin, xmax, ymin, ymax
Z1 = array(([0,1]*4 + [1,0]*4)*4); Z1.shape = 8,8 # chessboard
Z2 = func3(X, Y)
axes = fig.gca()
axes.imshow(Z1, cmap=cm.gray, interpolation='nearest',
extent=extent)
axes.hold(True)
axes.imshow(Z2, cmap=cm.jet, alpha=.9, interpolation='bilinear',
extent=extent)
def plot_axes(fig):
# create some data to use for the plot
dt = 0.001
t = arange(0.0, 10.0, dt)
r = exp(-t[:1000]/0.05) # impulse response
x = randn(len(t))
s = convolve(x,r,mode=2)[:len(x)]*dt # colored noise
# the main axes is subplot(111) by default
axes = fig.gca()
axes.plot(t, s)
axes.set_xlim((0, 1))
axes.set_ylim((1.1*min(s), 2*max(s)))
axes.set_xlabel('time (s)')
axes.set_ylabel('current (nA)')
axes.set_title('Gaussian colored noise')
# this is an inset axes over the main axes
a = fig.add_axes([.65, .6, .2, .2], axisbg='y')
n, bins, patches = a.hist(s, 400, normed=1)
a.set_title('Probability')
a.set_xticks([])
a.set_yticks([])
# this is another inset axes over the main axes
a = fig.add_axes([.2, .6, .2, .2], axisbg='y')
a.plot(t[:len(r)], r)
a.set_title('Impulse response')
a.set_xlim((0, 0.2))
a.set_xticks([])
a.set_yticks([])
#
# Demo Infrastructure
#
class Demo:
def __init__(self, title, plotFunction, size=(6.0, 3.7), dpi=96):
self.title = title
self.plotFunction = plotFunction
self.size = size
self.dpi = dpi
def run(self):
frame = wxmplitf.PlotFrame(None, -1, self.title, lock = None,
dpi=self.dpi)
ret = self.plotFunction(frame.get_figure())
if ret is not None:
frame.panel.lined = ret
frame.draw()
frame.Show()
def makeButton(self, parent):
btn = wx.Button(parent, -1, self.title)
wx.EVT_BUTTON(btn, btn.GetId(), self.OnButton)
return btn
def OnButton(self, evt):
self.run()
DEMONSTRATIONS = [
Demo('Simple Plot', plot_simple),
Demo('Subplots', plot_subplot),
Demo('Shared X Axes', plot_subplot_sharex),
Demo('Histogram Plot', plot_histogram),
Demo('Filled Polygons', plot_fill),
Demo('Logarithmic Scaling', plot_log),
Demo('Polar Plot', plot_polar, (6.0, 6.0)),
Demo('Polar and Linear Subplots', plot_polar_subplot, (8.0,4.0)),
Demo('Linear Plot with a Legend', plot_legend),
Demo('Pseudocolor Image', plot_image),
Demo('Layered Images', plot_layered_images),
Demo('Overlapping Axes', plot_axes)
]
class TestFrame(wx.Frame):
def __init__(self, parent, id, title, **kwds):
wx.Frame.__init__(self, parent, id, title, **kwds)
buttons = [demo.makeButton(self) for demo in DEMONSTRATIONS]
sizer = wx.BoxSizer(wx.VERTICAL)
for btn in buttons:
sizer.Add(btn, 0, wx.EXPAND|wx.ALL, 5)
self.SetSizer(sizer)
self.Fit()
wx.EVT_WINDOW_DESTROY(self, self.OnWindowDestroy)
def OnWindowDestroy(self, evt):
wx.GetApp().ExitMainLoop()
def main():
app = wx.PySimpleApp()
frame = TestFrame(None, -1, 'WxMpl Demos')
frame.Show(True)
app.MainLoop()
if __name__ == '__main__':
main()
| digitie/magneto | native/wxmplplot/plotting.py | Python | unlicense | 9,658 | [
"Gaussian"
] | 118b26347cf32c7cf37924ca8cc9c3af7b29f7423133f0a5a8593b215382d46f |
"""
The more times we visit a hypothesis, the more we decrease its prior
TODO
----
* Try version where penalty decreases with time!
* This currently only extends LOTHypotheses, since we have to handle casting inside of h0 to WrapperClass.
... HOWEVER, we could make WrapperClass just dispatch the right methods if they don't exist
"""
from collections import Counter
from MetropolisHastings import MHSampler
class TabooMCMC(MHSampler):
"""
An MCMC sampler that penalizes `self.posterior`
Attributes
----------
seen : Counter
Keep track of all the samples we've drawn; this is a dictionary.
penalty : float
How much do we penalize for each sample?
Note
----
Requires storing of all hypotheses visited.
"""
def __init__(self, h0, data, penalty=1.0, **kwargs):
MHSampler.__init__(self, h0, data, **kwargs)
self.penalty = penalty
self.seen = Counter()
def internal_sample(self, h):
self.seen[h] += 1
def compute_posterior(self, h, data):
"""
Compute prior & likelihood for `h`, penalizing prior by how many samples have been generated so far.
"""
return self.seen[h] * self.penalty + h.compute_posterior(data)
if __name__ == "__main__":
from LOTlib.Examples.Number.Model import *
from LOTlib.Miscellaneous import q
data = generate_data(500)
h0 = NumberExpression(grammar)
for h in TabooMCMC(h0, data, steps=10000):
print q(get_knower_pattern(h)), h.posterior_score, h.prior, h.likelihood, q(h)
| ebigelow/LOTlib | LOTlib/Inference/Samplers/TabooMCMC.py | Python | gpl-3.0 | 1,569 | [
"VisIt"
] | 36a51b8cc4c6b6e858466b19439d23ae6573355a4934e979617fa9b617876cb2 |
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import math
def mass_frac_cut(datas, mn):
ageend=datas['AGEEND']
print('I THINK I DID IT WOOHOO')
TM=get_tot_mass(datas, 'MASS','SPECOBJID')
#data1=datas[(ageend==np.max(ageend))&(datas['MASS']/datas['M_STELLAR']>=mn)]
data1=datas[(ageend==np.max(ageend))&(datas['MASS']/TM>=mn)]
a=np.in1d(datas['SPECOBJID','M_STELLAR'], data1['SPECOBJID','M_STELLAR'], assume_unique=False)
newdat=datas[a==True]
#print(newdat['SPECOBJID','AGESTART','MASS', 'M_STELLAR'])
print('Length of data after mf mincut: ', len(newdat))
return newdat
def mass_frac_cut1(datas, mn, get_opp=False): #original way
ageend=datas['AGEEND']
print('I THINK I DID IT WOOHOO')
#print(datas['SPECOBJID','AGESTART','MASS', 'M_STELLAR']) #check to make sure it has more than data1
data1=datas[(ageend==np.max(ageend))&(datas['MASS']/datas['M_STELLAR']>=mn)]
a=np.in1d(datas['SPECOBJID','M_STELLAR'], data1['SPECOBJID','M_STELLAR'], assume_unique=False)
newdat=datas[a==True]
#databad=newdat[newdat['MASS']>newdat['M_STELLAR']]
if get_opp==True:
notdat=datas[a==False]
print('Length of data after mf mincut: ', len(newdat))
print('Length of other data: ', len(notdat))
return newdat, notdat
else:
print('Length of data after mf mincut: ', len(newdat))
return newdat
def divide_error(dat1, dat2, er1, er2):
#want to find error of m+-dm/Tm+-dTm
div_err= lambda m, TM, merr, TMerr: m/TM*math.sqrt((merr/m)**2+(TMerr/TM)**2)
errs=[]
for i in range(len(dat1)):
err1=div_err(dat1[i], dat2[i], er1[i], er2[i])
errs.append(err1)
#print('errors= ', errs)
where_nans= np.isnan(errs)
print('Find nans', where_nans)
errs[where_nans]=0
print('errors= ', errs)
return(errs)
def no_repeats(x):
print('Length with repeats: ', len(x))
xs=[]
for i in x:
if i not in xs:
xs.append(i)
print('Length without repeats: ', len(xs))
return xs
def common_element(list1, list2):
result=[]
for el in list1:
if el in list2:
result.append(el)
result=np.array(result)
return result
def test_sum(data, col1, col2, namecol):
name=data[namecol]
onename=no_repeats(name)
badgal=[]
for n in onename:
newd=data[name==n]
#print(len(newd)) #why is this 64 and not 16--> 4 trials
nmass=newd[col1]
nTmass=newd[col2]
sing,ind, inv, counts=np.unique(nTmass, return_index=True, return_counts=True,return_inverse=True)
if len(sing)==2:
totnums=sing*2
elif len(sing)==1:
totnums=sing*4
elif len(sing)==3:
totnums=sing
elif len(sing)==4:
totnums=sing
else:
print('Number of different total masses is actually: ', len(sing))
sumtotnums=np.sum(totnums)
sum1=np.sum(nmass)
print('Galaxy is ', n, 'with mass= ', sum1, ' of total mass= ', sumtotnums)
if sum1>sumtotnums:
badgal.append(n)
#break
print('These are the bad galaxies: ', len(badgal), len(onename))
def data_cut(data, col1, col2):
m=data[col1]
Tm=data[col2]
databad=data[m>Tm]
datagood=data[m<=Tm]
#datagood=datas[mass<=Tmass]
print('Number of bad lines: ', len(databad))
print('Number of good lines: ', len(datagood))
print('Number of total lines: ', len(data))
#print(databad['SPECOBJID','AGESTART', 'AGEEND', col1, col2])
return datagood
def get_tot_mass(data, col1, namecol):
#gives the sum of the masses in each age bin
Totmass=[]
names=data[namecol]
name=np.unique(names)
for n in name:
#newd=data[names==names[n]]
newd=data[names==n]
nmass=newd[col1]
numdat=len(newd)
div=numdat/16 #total number divided by 16 bins
#print('divide= ', div)
M=np.sum(nmass)
MM=M/div
#Totmass.append(MM)
for n in range(numdat):
Totmass.append(MM)
#break
TM=np.array(Totmass)
#print(TM)
return TM
def get_tot_mass1(data, col1, namecol):
#gives the sum of the masses in each age bin
Totmass=[]
names=data[namecol]
name, c=np.unique(names,return_counts=True)
#print(c)
#print(len(name))
way=1
if way==1:
for n in range(len(names)):
newd=data[names==names[n]]
#print(names[n])
nmass=newd[col1]
numdat=len(newd)
#print(newd)
#print(numdat)
div=numdat/16 #total number divided by 16 bins
#print('divide= ', div)
#print('masses= ', nmass)
M=np.sum(nmass)
MM=M/div
Totmass.append(MM)
#break
TM=np.array(Totmass)
else:
for n in name:
newd=data[names==n]
nmass=newd[col1]
numdat=len(newd)
div=numdat/16
M=np.sum(nmass)
MM=M/div
for i in range(numdat):
Totmass.append(MM)
TM=np.array(Totmass)
#print(TM)
return TM
def stack_mass3(ages,mass, Tmass,bb):
#bb, x is agestarts
mass_sum=[]
std_err=[]
for bin in bb:
print('Age Start= ', bin)
stmass=mass[ages==bin]
stm=Tmass[ages==bin]
mf=stmass/stm
#print(len(mf))
#print(mf)
means=np.mean(mf)
#mean1=np.sum(mf)/len(stm) #used to check to make sure means=mean1
err=np.std(mf)/len(mf)
std_err.append(err) #errors on the mean in each individual bin
mass_sum.append(means)
print('mean=', means)
#break
return mass_sum, std_err
| anewmark/galaxy_dark_matter | def_ages.py | Python | mit | 5,004 | [
"Galaxy"
] | 9ecebe3a6bd5aacf8486c6604b460e555eb9144f121daded31801dbb1828ba7e |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import csv
import collections
import itertools
from io import open
import math
from six.moves import zip
import logging
from monty.json import MSONable, MontyDecoder
from monty.string import unicode2str
from monty.functools import lru_cache
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element, DummySpecie, get_el_sp
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.analysis.reaction_calculator import Reaction, \
ReactionError
"""
This module defines tools to generate and analyze phase diagrams.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2011"
logger = logging.getLogger(__name__)
class PDEntry(MSONable):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: composition
The composition associated with the PDEntry.
.. attribute:: energy
The energy associated with the entry.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
.. attribute:: attribute
A arbitrary attribute.
Args:
composition (Composition): Composition
energy (float): Energy for composition.
name (str): Optional parameter to name the entry. Defaults to the
reduced chemical formula.
attribute: Optional attribute of the entry. This can be used to
specify that the entry is a newly found compound, or to specify a
particular label for the entry, or else ... Used for further
analysis and plotting purposes. An attribute can be anything
but must be MSONable.
"""
def __init__(self, composition, energy, name=None, attribute=None):
self.energy = energy
self.composition = Composition(composition)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy_per_atom(self):
"""
Returns the final energy per atom.
"""
return self.energy / self.composition.num_atoms
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.composition.is_element
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"composition": self.composition.as_dict(),
"energy": self.energy,
"name": self.name,
"attribute": self.attribute}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
def __hash__(self):
return id(self)
@classmethod
def from_dict(cls, d):
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
@staticmethod
def to_csv(filename, entries, latexify_names=False):
"""
Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O
"""
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements), key=lambda a: a.X)
writer = csv.writer(open(filename, "w"), delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + elements + ["Energy"])
for entry in entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
row.extend([entry.composition[el] for el in elements])
row.append(entry.energy)
writer.writerow(row)
@staticmethod
def from_csv(filename):
"""
Imports PDEntries from a csv.
Args:
filename: Filename to import from.
Returns:
List of Elements, List of PDEntries
"""
with open(filename, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
elements = None
for row in reader:
if not header_read:
elements = row[1:(len(row) - 1)]
header_read = True
else:
name = row[0]
energy = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 1):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
entries.append(PDEntry(Composition(comp), energy, name))
elements = [Element(el) for el in elements]
return elements, entries
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
def __init__(self, entry, chempots, name=None):
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super(GrandPotPDEntry, self).__init__(new_comp_map, grandpot,
entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
Args:
comp (Composition): Transformed composition as a Composition.
original_entry (PDEntry): Original entry that this entry arose from.
"""
def __init__(self, comp, original_entry):
super(TransformedPDEntry, self).__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}"
.format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
class PhaseDiagram(MSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: simplices:
The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = list(elements)
dim = len(elements)
get_reduced_comp = lambda e: e.composition.reduced_composition
entries = sorted(entries, key=get_reduced_comp)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=get_reduced_comp):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
raise PhaseDiagramError(
"There are no entries associated with a terminal element!.")
data = np.array([
[e.composition.get_atomic_fraction(el) for el in elements] + [
e.energy_per_atom]
for e in min_entries
])
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = all_entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
self._stable_entries = set(self.qhull_entries[i] for i in
set(itertools.chain(*self.facets)))
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
"""
if set(comp.elements).difference(self.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self.elements))
return np.array(
[comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
data = []
for entry in self.all_entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in self.elements]
row.append(entry.energy_per_atom)
data.append(row)
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
return self._stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = [PDEntry.from_dict(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in
complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
element = get_el_sp(element)
element = Element(element.symbol)
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of the simplex
in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition except dep_elt.
The chemical potential of dep_elt is computed from the target composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != dep_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / \
target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials correspoding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super(GrandPotentialPhaseDiagram, self).__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super(CompoundPhaseDiagram, self).__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
# We only allow reactions that have positive amounts of
# reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminals}
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class ReactionDiagram(object):
def __init__(self, entry1, entry2, all_entries, tol=1e-4,
float_fmt="%.4f"):
"""
Analyzes the possible reactions between a pair of compounds, e.g.,
an electrolyte and an electrode.
Args:
entry1 (ComputedEntry): Entry for 1st component. Note that
corrections, if any, must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
entry2 (ComputedEntry): Entry for 2nd component. Note that
corrections must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
all_entries ([ComputedEntry]): All other entries to be
considered in the analysis. Note that corrections, if any,
must already be pre-applied.
tol (float): Tolerance to be used to determine validity of reaction.
float_fmt (str): Formatting string to be applied to all floats.
Determines number of decimal places in reaction string.
"""
elements = set()
for e in [entry1, entry2]:
elements.update([el.symbol for el in e.composition.elements])
elements = tuple(elements) # Fix elements to ensure order.
comp_vec1 = np.array([entry1.composition.get_atomic_fraction(el)
for el in elements])
comp_vec2 = np.array([entry2.composition.get_atomic_fraction(el)
for el in elements])
r1 = entry1.composition.reduced_composition
r2 = entry2.composition.reduced_composition
logger.debug("%d total entries." % len(all_entries))
pd = PhaseDiagram(all_entries + [entry1, entry2])
terminal_formulas = [entry1.composition.reduced_formula,
entry2.composition.reduced_formula]
logger.debug("%d stable entries" % len(pd.stable_entries))
logger.debug("%d facets" % len(pd.facets))
logger.debug("%d qhull_entries" % len(pd.qhull_entries))
rxn_entries = []
done = []
fmt = lambda fl: float_fmt % fl
for facet in pd.facets:
for face in itertools.combinations(facet, len(facet) - 1):
face_entries = [pd.qhull_entries[i] for i in face]
if any([e.composition.reduced_formula in terminal_formulas
for e in face_entries]):
continue
try:
m = []
for e in face_entries:
m.append([e.composition.get_atomic_fraction(el)
for el in elements])
m.append(comp_vec2 - comp_vec1)
m = np.array(m).T
coeffs = np.linalg.solve(m, comp_vec2)
x = coeffs[-1]
if all([c >= -tol for c in coeffs]) and \
(abs(sum(coeffs[:-1]) - 1) < tol) and \
(tol < x < 1 - tol):
c1 = x / r1.num_atoms
c2 = (1 - x) / r2.num_atoms
factor = 1 / (c1 + c2)
c1 *= factor
c2 *= factor
# Avoid duplicate reactions.
if any([np.allclose([c1, c2], cc) for cc in done]):
continue
done.append((c1, c2))
rxn_str = "%s %s + %s %s -> " % (
fmt(c1), r1.reduced_formula,
fmt(c2), r2.reduced_formula)
products = []
energy = - (x * entry1.energy_per_atom +
(1 - x) * entry2.energy_per_atom)
for c, e in zip(coeffs[:-1], face_entries):
if c > tol:
r = e.composition.reduced_composition
products.append("%s %s" % (
fmt(c / r.num_atoms * factor),
r.reduced_formula))
energy += c * e.energy_per_atom
rxn_str += " + ".join(products)
comp = x * comp_vec1 + (1 - x) * comp_vec2
entry = PDEntry(
Composition(dict(zip(elements, comp))),
energy=energy, attribute=rxn_str)
rxn_entries.append(entry)
except np.linalg.LinAlgError as ex:
logger.debug("Reactants = %s" % (", ".join([
entry1.composition.reduced_formula,
entry2.composition.reduced_formula])))
logger.debug("Products = %s" % (
", ".join([e.composition.reduced_formula
for e in face_entries])))
rxn_entries = sorted(rxn_entries, key=lambda e: e.name, reverse=True)
self.entry1 = entry1
self.entry2 = entry2
self.rxn_entries = rxn_entries
self.labels = collections.OrderedDict()
for i, e in enumerate(rxn_entries):
self.labels[str(i + 1)] = e.attribute
e.name = str(i + 1)
self.all_entries = all_entries
self.pd = pd
def get_compound_pd(self):
"""
Get the CompoundPhaseDiagram object, which can then be used for
plotting.
Returns:
(CompoundPhaseDiagram)
"""
# For this plot, since the reactions are reported in formation
# energies, we need to set the energies of the terminal compositions
# to 0. So we make create copies with 0 energy.
entry1 = PDEntry(self.entry1.composition, 0)
entry2 = PDEntry(self.entry2.composition, 0)
cpd = CompoundPhaseDiagram(
self.rxn_entries + [entry1, entry2],
[Composition(entry1.composition.reduced_formula),
Composition(entry2.composition.reduced_formula)],
normalize_terminal_compositions=False)
return cpd
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
class PDPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: PhaseDiagram object.
show_unstable (float): Whether unstable phases will be plotted as
well as red crosses. If a number > 0 is entered, all phases with
ehull < show_unstable will be shown.
\\*\\*plotkwargs: Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
def __init__(self, phasediagram, show_unstable=0, **plotkwargs):
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3
}
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False):
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"])
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt
def show(self, *args, **kwargs):
"""
Draws the phase diagram using Matplotlib and show it.
Args:
\\*args: Passed to get_plot.
\\*\\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
ticks = cbar.ax.get_yticklabels()
# cbar.ax.set_yticklabels(['${v}$'.format(
# v=float(t.get_text().strip('$'))*1000.0) for t in ticks])
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", **kwargs):
"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
\\*\\*kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(
elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and \
sum([comp.get_atomic_fraction(el) for el in
elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (not nameup in ordering) or (not nameright in ordering) or \
(not nameleft in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
| czhengsci/pymatgen | pymatgen/analysis/phase_diagram.py | Python | mit | 84,711 | [
"pymatgen"
] | b0da6f5b1d2501a4e56a3f1371c81847aaffd334a464805e688459bf43db3356 |
# being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
import re
from collections import namedtuple
from distutils.version import LooseVersion
import numpy as np
from pandas.util._decorators import cache_readonly
import pandas.core.common as com
from pandas.core.base import PandasObject
from pandas.core.config import get_option
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.dtypes.common import (
is_list_like,
is_integer,
is_number,
is_hashable,
is_iterator)
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.compat import range, lrange, map, zip, string_types
import pandas.compat as compat
from pandas.io.formats.printing import pprint_thing
from pandas.util._decorators import Appender
from pandas.plotting._compat import (_mpl_ge_1_3_1,
_mpl_ge_1_5_0,
_mpl_ge_2_0_0)
from pandas.plotting._style import (plot_params,
_get_standard_colors)
from pandas.plotting._tools import (_subplots, _flatten, table,
_handle_shared_axes, _get_all_lines,
_get_xlim, _set_ticks_props,
format_date_labels)
try:
from pandas.plotting import _converter
except ImportError:
_HAS_MPL = False
else:
_HAS_MPL = True
if get_option('plotting.matplotlib.register_converters'):
_converter.register(explicit=True)
def _raise_if_no_mpl():
# TODO(mpl_converter): remove once converter is explicit
if not _HAS_MPL:
raise ImportError("matplotlib is required for plotting.")
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _gca(rc=None):
import matplotlib.pyplot as plt
with plt.rc_context(rc):
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
_raise_if_no_mpl()
_converter._WARN = False
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1 and
not is_list_like(self.kwds['color'])):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds and isinstance(self.kwds['color'], tuple) and
self.nseries == 1 and len(self.kwds['color']) in (3, 4)):
# support RGB and RGBA tuples in series plot
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError(
"Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
# TODO: unused?
# if self.sort_columns:
# columns = com._try_sort(data.columns)
# else:
# columns = data.columns
for col, values in data.iteritems():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
numeric_data = data.select_dtypes(include=[np.number,
"datetime",
"datetimetz",
"timedelta"])
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise com.AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ''
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [get_label(x) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [get_label(y) for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@staticmethod
def mpl_ge_1_3_1():
return _mpl_ge_1_3_1()
@staticmethod
def mpl_ge_1_5_0():
return _mpl_ge_1_5_0()
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
@classmethod
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, Index):
x = x._mpl_repr()
if is_errorbar:
if 'xerr' in kwds:
kwds['xerr'] = np.array(kwds.get('xerr'))
if 'yerr' in kwds:
kwds['yerr'] = np.array(kwds.get('yerr'))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if com._any_not_none(*name):
name = ','.join(pprint_thing(x) for x in name)
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
"""
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid {label} detected".format(label=label)
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [ax for ax in self.axes[0].get_figure().get_axes()
if isinstance(ax, Subplot)]
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = 'single'
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + ' requires an x and y column')
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires x column to be numeric')
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires y column to be numeric')
self.x = x
self.y = y
@property
def nseries(self):
return 1
def _post_plot_logic(self, ax, data):
x, y = self.x, self.y
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
class ScatterPlot(PlanePlot):
_kind = 'scatter'
def __init__(self, data, x, y, s=None, c=None, **kwargs):
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.c = c
def _make_plot(self):
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = self.plt.cm.get_cmap(cmap)
color = self.kwds.pop("color", None)
if c is not None and color is not None:
raise TypeError('Specify exactly one of `c` and `color`')
elif c is None and color is None:
c_values = self.plt.rcParams['patch.facecolor']
elif color is not None:
c_values = color
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if self.mpl_ge_1_3_1():
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values,
linestyle='none', **err_kwds)
class HexBinPlot(PlanePlot):
_kind = 'hexbin'
def __init__(self, data, x, y, C=None, **kwargs):
super(HexBinPlot, self).__init__(data, x, y, **kwargs)
if is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.C = C
def _make_plot(self):
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
class LinePlot(MPLPlot):
_kind = 'line'
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _use_dynamic_x(self):
from pandas.plotting._timeseries import _use_dynamic_x
return _use_dynamic_x(self._get_ax(0), self.data)
def _make_plot(self):
if self._is_ts_plot():
from pandas.plotting._timeseries import _maybe_convert_index
data = _maybe_convert_index(self._get_ax(0), self.data)
x = data.index # dummy, not used
plotf = self._ts_plot
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._plot
it = self._iter_data()
stacking_id = self._get_stacking_id()
is_errorbar = com._any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
kwds = self.kwds.copy()
style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i,
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds)
self._add_legend_handle(newlines[0], label, index=i)
if not _mpl_ge_2_0_0():
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, **kwds):
# column_num is used to get the target column from protf in line and
# area plots
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)
cls._update_stacker(ax, stacking_id, y)
return lines
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._timeseries import (_maybe_resample,
_decorate_axes,
format_dateaxis)
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
# column_num must be in kwds for stacking purpose
freq, data = _maybe_resample(data, ax, kwds)
# Set ax with freq info
_decorate_axes(ax, freq, kwds)
# digging deeper
if hasattr(ax, 'left_ax'):
_decorate_axes(ax.left_ax, freq, kwds)
if hasattr(ax, 'right_ax'):
_decorate_axes(ax.right_ax, freq, kwds)
ax._plot_data.append((data, cls._kind, kwds))
lines = cls._plot(ax, data.index, data.values, style=style, **kwds)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, data.index)
return lines
def _get_stacking_id(self):
if self.stacked:
return id(self.data)
else:
return None
@classmethod
def _initialize_stacker(cls, ax, stacking_id, n):
if stacking_id is None:
return
if not hasattr(ax, '_stacker_pos_prior'):
ax._stacker_pos_prior = {}
if not hasattr(ax, '_stacker_neg_prior'):
ax._stacker_neg_prior = {}
ax._stacker_pos_prior[stacking_id] = np.zeros(n)
ax._stacker_neg_prior[stacking_id] = np.zeros(n)
@classmethod
def _get_stacked_values(cls, ax, stacking_id, values, label):
if stacking_id is None:
return values
if not hasattr(ax, '_stacker_pos_prior'):
# stacker may not be initialized for subplots
cls._initialize_stacker(ax, stacking_id, len(values))
if (values >= 0).all():
return ax._stacker_pos_prior[stacking_id] + values
elif (values <= 0).all():
return ax._stacker_neg_prior[stacking_id] + values
raise ValueError('When stacked is True, each column must be either '
'all positive or negative.'
'{0} contains both positive and negative values'
.format(label))
@classmethod
def _update_stacker(cls, ax, stacking_id, values):
if stacking_id is None:
return
if (values >= 0).all():
ax._stacker_pos_prior[stacking_id] += values
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax, data):
condition = (not self._use_dynamic_x() and
data.index.is_all_dates and
not self.subplots or
(self.subplots and self.sharex))
index_name = self._get_index_name()
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
_kind = 'area'
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, is_errorbar=False, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
if cls.mpl_ge_1_5_0():
line_kwds.pop('label')
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
# unable to use ``_get_stacked_values`` here to get starting point
if stacking_id is None:
start = np.zeros(len(y))
elif (y >= 0).all():
start = ax._stacker_pos_prior[stacking_id]
elif (y <= 0).all():
start = ax._stacker_neg_prior[stacking_id]
else:
start = np.zeros(len(y))
if 'color' not in kwds:
kwds['color'] = lines[0].get_color()
rect = ax.fill_between(xdata, start, y_values, **kwds)
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
res = [rect] if cls.mpl_ge_1_5_0() else lines
return res
def _add_legend_handle(self, handle, label, index=None):
if not self.mpl_ge_1_5_0():
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(),
alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self, ax, data):
LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
elif (data <= 0).all().all():
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_kind = 'bar'
_default_rot = 90
orientation = 'vertical'
def __init__(self, data, **kwargs):
# we have to treat a series differently than a
# 1-column DataFrame w.r.t. color handling
self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log', False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if is_list_like(self.left):
self.left = np.array(self.left)
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
@property
def _start_base(self):
return self.bottom
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds['color'] = colors
else:
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
start = start + self._start_base
if self.subplots:
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label,
log=self.log, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self, ax, data):
if self.use_index:
str_index = [pprint_thing(key) for key in data.index]
else:
str_index = [pprint_thing(key) for key in range(data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_xlabel(name)
class BarhPlot(BarPlot):
_kind = 'barh'
_default_rot = 0
orientation = 'horizontal'
@property
def _start_base(self):
return self.left
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_ylabel(name)
class HistPlot(LinePlot):
_kind = 'hist'
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if is_integer(self.bins):
# create common bin edge
values = (self.data._convert(datetime=True)._get_numeric_data())
values = np.ravel(values)
values = values[~isna(values)]
hist, self.bins = np.histogram(
values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,
stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
y = y[~isna(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + \
cls._get_stacked_values(ax, stacking_id, base, kwds['label'])
# ignore style
n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
def _make_plot(self):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label)
kwds['label'] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds['style'] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i,
stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
# y is required for KdePlot
kwds['bottom'] = self.bottom
kwds['bins'] = self.bins
return kwds
def _post_plot_logic(self, ax, data):
if self.orientation == 'horizontal':
ax.set_xlabel('Frequency')
else:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
_kde_docstring = """
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwds : optional
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
Returns
-------
axes : matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
%(sibling-datatype)s.plot.kde : Generate a KDE plot for a
%(sibling-datatype)s.
Examples
--------
%(examples)s
"""
class KdePlot(HistPlot):
_kind = 'kde'
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
elif is_integer(self.ind):
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, self.ind)
else:
ind = self.ind
return ind
@classmethod
def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
column_num=None, stacking_id=None, **kwds):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
y = remove_na_arraylike(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=bw_method)
else:
gkde = gaussian_kde(y)
if bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is {spv}.'.format(spv=spv))
warnings.warn(msg)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds, y):
kwds['bw_method'] = self.bw_method
kwds['ind'] = self._get_ind(y)
return kwds
def _post_plot_logic(self, ax, data):
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_kind = 'pie'
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
colors = self._get_colors(
num_colors=len(self.data), color_kwds='colors')
self.kwds.setdefault('colors', colors)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(l, value) for
l, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_kind = 'box'
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type='axes', **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError(
"return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == 'dict':
return bp, bp
elif return_type == 'both':
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid "
"key '{0}' "
"The key must be either {1}"
.format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
from pandas.core.series import Series
self._return_obj = Series()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=i,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=0,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self._iter_data()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self, ax, data):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh',
'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot,
ScatterPlot, HexBinPlot, AreaPlot, PiePlot]
_plot_klass = {}
for klass in _klasses:
_plot_klass[klass._kind] = klass
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
if kind in _dataframe_kinds:
if isinstance(data, ABCDataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, ABCDataFrame):
data_cols = data.columns
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data_cols[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
# check if we have y as int or list of ints
int_ylist = is_list_like(y) and all(is_integer(c) for c in y)
int_y_arg = is_integer(y) or int_ylist
if int_y_arg and not data.columns.holds_integer():
y = data_cols[y]
label_kw = kwds['label'] if 'label' in kwds else False
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
# don't overwrite
data = data[y].copy()
if isinstance(data, ABCSeries):
label_name = label_kw or y
data.name = label_name
else:
match = is_list_like(label_kw) and len(label_kw) == len(y)
if label_kw and not match:
raise ValueError(
"label should be list-like and same length as y"
)
label_name = label_kw or data.columns
data.columns = label_name
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df',
klass_kind=df_kind, klass_coord=df_coord,
klass_ax=df_ax, klass_unique=df_unique,
klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s',
klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
*New in version 0.17.0:* Each plot kind has a corresponding method on the
``%(klass)s.plot`` accessor:
``%(klass_obj)s.plot(kind='line')`` is equivalent to
``%(klass_obj)s.plot.line()``.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
`**kwds` : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False,
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame columns.
Make a box-and-whisker plot from DataFrame columns, optionally grouped
by some other columns. A box plot is a method for graphically depicting
groups of numerical data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box.
Outlier points are those past the end of the whiskers.
For further details see
Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
Parameters
----------
column : str or list of str, optional
Column name or list of names, or vector.
Can be any valid input to :meth:`pandas.DataFrame.groupby`.
by : str or array-like, optional
Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
One box-plot will be done per value of columns in `by`.
ax : object of class matplotlib.axes.Axes, optional
The matplotlib axes to be used by boxplot.
fontsize : float or str
Tick label font size in points or as a string (e.g., `large`).
rot : int or float, default 0
The rotation angle of labels (in degrees)
with respect to the screen coordinate system.
grid : boolean, default True
Setting this to True will show the grid.
figsize : A tuple (width, height) in inches
The size of the figure to create in matplotlib.
layout : tuple (rows, columns), optional
For example, (3, 5) will display the subplots
using 3 columns and 5 rows, starting from the top-left.
return_type : {'axes', 'dict', 'both'} or None, default 'axes'
The kind of object to return. The default is ``axes``.
* 'axes' returns the matplotlib axes the boxplot is drawn on.
* 'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot.
* 'both' returns a namedtuple with the axes and dict.
* when grouping with ``by``, a Series mapping columns to
``return_type`` is returned.
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
**kwds
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
Returns
-------
result :
The return type depends on the `return_type` parameter:
* 'axes' : object of class matplotlib.axes.Axes
* 'dict' : dict of matplotlib.lines.Line2D objects
* 'both' : a namedtuple with structure (ax, lines)
For data grouped with ``by``:
* :class:`~pandas.Series`
* :class:`~numpy.array` (for ``return_type = None``)
See Also
--------
Series.plot.hist: Make a histogram.
matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
Examples
--------
Boxplots can be created for every column in the dataframe
by ``df.boxplot()`` or indicating the columns to be used:
.. plot::
:context: close-figs
>>> np.random.seed(1234)
>>> df = pd.DataFrame(np.random.randn(10,4),
... columns=['Col1', 'Col2', 'Col3', 'Col4'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
Boxplots of variables distributions grouped by the values of a third
variable can be created using the option ``by``. For instance:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Col1', 'Col2'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> boxplot = df.boxplot(by='X')
A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
in order to group the data by combination of the variables in the x-axis:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10,3),
... columns=['Col1', 'Col2', 'Col3'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
... 'B', 'A', 'B', 'A', 'B'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
The layout of boxplot can be adjusted giving a tuple to ``layout``:
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... layout=(2, 1))
Additional formatting can be done to the boxplot, like suppressing the grid
(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
or changing the fontsize (i.e. ``fontsize=15``):
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
The parameter ``return_type`` can be used to select the type of element
returned by `boxplot`. When ``return_type='axes'`` is selected,
the matplotlib axes on which the boxplot is drawn are returned:
>>> boxplot = df.boxplot(column=['Col1','Col2'], return_type='axes')
>>> type(boxplot)
<class 'matplotlib.axes._subplots.AxesSubplot'>
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type='axes')
>>> type(boxplot)
<class 'pandas.core.series.Series'>
If ``return_type`` is `None`, a NumPy array of axes with the same shape
as ``layout`` is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type=None)
>>> type(boxplot)
<class 'numpy.ndarray'>
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v)) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis='both', labelsize=fontsize)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if return_type is None:
return_type = 'axes'
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if ax is None:
rc = {'figure.figsize': figsize} if figsize is not None else {}
ax = _gca(rc)
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None,
return_type=None, **kwds):
import matplotlib.pyplot as plt
_converter._WARN = False
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : string or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : boolean, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
axes : matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
.. plot::
:context: close-figs
This example draws a histogram based on the length and width of
some animals, displayed in three bins
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
_raise_if_no_mpl()
_converter._WARN = False
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(com._try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
bins: integer, default 10
Number of histogram bins to be used
`**kwds` : keywords
To be passed to the actual plotting function
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
_raise_if_no_mpl()
_converter._WARN = False
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, sharex=False, sharey=True, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
sharex : bool, default False
Whether x-axes will be shared among subplots
.. versionadded:: 0.23.1
sharey : bool, default True
Whether y-axes will be shared among subplots
.. versionadded:: 0.23.1
`**kwds` : Keyword Arguments
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
_raise_if_no_mpl()
_converter._WARN = False
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=sharex, sharey=sharey,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning, stacklevel=4)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None,
return_type=None, **kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
ax_values = []
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(pprint_thing(by))
ax_values.append(re_plotf)
ax.grid(grid)
from pandas.core.series import Series
result = Series(ax_values, index=columns)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by {byline}'.format(byline=byline))
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
class BasePlotMethods(PandasObject):
def __init__(self, data):
self._data = data
def __call__(self, *args, **kwargs):
raise NotImplementedError
class SeriesPlotMethods(BasePlotMethods):
"""Series plotting accessor and method
Examples
--------
>>> s.plot.line()
>>> s.plot.bar()
>>> s.plot.hist()
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='line')`` is equivalent to ``s.plot.line()``
"""
def __call__(self, kind='line', ax=None,
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False,
loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, **kwds):
return plot_series(self._data, kind=kind, ax=ax, figsize=figsize,
use_index=use_index, title=title, grid=grid,
legend=legend, style=style, logx=logx, logy=logy,
loglog=loglog, xticks=xticks, yticks=yticks,
xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize,
colormap=colormap, table=table, yerr=yerr,
xerr=xerr, label=label, secondary_y=secondary_y,
**kwds)
__call__.__doc__ = plot_series.__doc__
def line(self, **kwds):
"""
Line plot
Parameters
----------
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series([1, 3, 2])
>>> s.plot.line()
"""
return self(kind='line', **kwds)
def bar(self, **kwds):
"""
Vertical bar plot
Parameters
----------
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='bar', **kwds)
def barh(self, **kwds):
"""
Horizontal bar plot
Parameters
----------
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='barh', **kwds)
def box(self, **kwds):
"""
Boxplot
Parameters
----------
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='box', **kwds)
def hist(self, bins=10, **kwds):
"""
Histogram
Parameters
----------
bins: integer, default 10
Number of histogram bins to be used
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='hist', bins=bins, **kwds)
@Appender(_kde_docstring % {
'this-datatype': 'Series',
'sibling-datatype': 'DataFrame',
'examples': """
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
""".strip()
})
def kde(self, bw_method=None, ind=None, **kwds):
return self(kind='kde', bw_method=bw_method, ind=ind, **kwds)
density = kde
def area(self, **kwds):
"""
Area plot
Parameters
----------
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='area', **kwds)
def pie(self, **kwds):
"""
Pie chart
Parameters
----------
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='pie', **kwds)
class FramePlotMethods(BasePlotMethods):
"""DataFrame plotting accessor and method
Examples
--------
>>> df.plot.line()
>>> df.plot.scatter('x', 'y')
>>> df.plot.hexbin()
These plotting methods can also be accessed by calling the accessor as a
method with the ``kind`` argument:
``df.plot(kind='line')`` is equivalent to ``df.plot.line()``
"""
def __call__(self, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, **kwds):
return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend, style=style,
logx=logx, logy=logy, loglog=loglog, xticks=xticks,
yticks=yticks, xlim=xlim, ylim=ylim, rot=rot,
fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr, secondary_y=secondary_y,
sort_columns=sort_columns, **kwds)
__call__.__doc__ = plot_frame.__doc__
def line(self, x=None, y=None, **kwds):
"""
Plot DataFrame columns as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Returns an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
return self(kind='line', x=x, y=y, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
pandas.DataFrame.plot.barh : Horizontal bar plot.
pandas.DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind='bar', x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwds):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them.
See Also
--------
pandas.DataFrame.plot.bar: Vertical bar plot.
pandas.DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
return self(kind='barh', x=x, y=y, **kwds)
def box(self, by=None, **kwds):
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
**kwds : optional
Additional keywords are documented in
:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
pandas.DataFrame.boxplot: Another method to draw a box plot.
pandas.Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list('ABCD'))
>>> ax = df.plot.box()
"""
return self(kind='box', by=by, **kwds)
def hist(self, by=None, bins=10, **kwds):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwds
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot histogram.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind='hist', by=by, bins=bins, **kwds)
@Appender(_kde_docstring % {
'this-datatype': 'DataFrame',
'sibling-datatype': 'Series',
'examples': """
Given several Series of points randomly sampled from unknown
distributions, estimate their PDFs using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
""".strip()
})
def kde(self, bw_method=None, ind=None, **kwds):
return self(kind='kde', bw_method=bw_method, ind=ind, **kwds)
density = kde
def area(self, x=None, y=None, **kwds):
"""
Area plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
`**kwds` : optional
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='area', x=x, y=y, **kwds)
def pie(self, y=None, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.axes.Axes or np.ndarray of them.
A NumPy array is returned when `subplots` is True.
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(6, 3))
"""
return self(kind='pie', y=y, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
The size of each point. Possible values are:
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwds
Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwds
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
| louispotok/pandas | pandas/plotting/_core.py | Python | bsd-3-clause | 126,383 | [
"Gaussian"
] | 860c06b590c82d3f78466b18f361c799182264ec2ed9f05c48eb8c3553dd10ed |
"""
revlog.py - storage back-end for mercurial
This provides efficient delta storage with O(1) retrieve and append
and O(changes) merge between branches
Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
"""
from node import bin, hex, nullid, nullrev, short
from i18n import _
import changegroup, errno, ancestor, mdiff
import sha, struct, util, zlib
_pack = struct.pack
_unpack = struct.unpack
_compress = zlib.compress
_decompress = zlib.decompress
_sha = sha.new
# revlog flags
REVLOGV0 = 0
REVLOGNG = 1
REVLOGNGINLINEDATA = (1 << 16)
REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
REVLOG_DEFAULT_FORMAT = REVLOGNG
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
class RevlogError(Exception):
pass
class LookupError(RevlogError):
def __init__(self, name, index, message):
self.name = name
if isinstance(name, str) and len(name) == 20:
name = short(name)
RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
def getoffset(q):
return int(q >> 16)
def gettype(q):
return int(q & 0xFFFF)
def offset_type(offset, type):
return long(long(offset) << 16 | type)
def hash(text, p1, p2):
"""generate a hash from the given text and its parent hashes
This hash combines both the current file contents and its history
in a manner that makes it easy to distinguish nodes with the same
content in the revision graph.
"""
l = [p1, p2]
l.sort()
s = _sha(l[0])
s.update(l[1])
s.update(text)
return s.digest()
def compress(text):
""" generate a possibly-compressed representation of text """
if not text:
return ("", text)
l = len(text)
bin = None
if l < 44:
pass
elif l > 1000000:
# zlib makes an internal copy, thus doubling memory usage for
# large files, so lets do this in pieces
z = zlib.compressobj()
p = []
pos = 0
while pos < l:
pos2 = pos + 2**20
p.append(z.compress(text[pos:pos2]))
pos = pos2
p.append(z.flush())
if sum(map(len, p)) < l:
bin = "".join(p)
else:
bin = _compress(text)
if bin is None or len(bin) > l:
if text[0] == '\0':
return ("", text)
return ('u', text)
return ("", bin)
def decompress(bin):
""" decompress the given input """
if not bin:
return bin
t = bin[0]
if t == '\0':
return bin
if t == 'x':
return _decompress(bin)
if t == 'u':
return bin[1:]
raise RevlogError(_("unknown compression type %r") % t)
class lazyparser(object):
"""
this class avoids the need to parse the entirety of large indices
"""
# lazyparser is not safe to use on windows if win32 extensions not
# available. it keeps file handle open, which make it not possible
# to break hardlinks on local cloned repos.
def __init__(self, dataf, size):
self.dataf = dataf
self.s = struct.calcsize(indexformatng)
self.datasize = size
self.l = size/self.s
self.index = [None] * self.l
self.map = {nullid: nullrev}
self.allmap = 0
self.all = 0
self.mapfind_count = 0
def loadmap(self):
"""
during a commit, we need to make sure the rev being added is
not a duplicate. This requires loading the entire index,
which is fairly slow. loadmap can load up just the node map,
which takes much less time.
"""
if self.allmap:
return
end = self.datasize
self.allmap = 1
cur = 0
count = 0
blocksize = self.s * 256
self.dataf.seek(0)
while cur < end:
data = self.dataf.read(blocksize)
off = 0
for x in xrange(256):
n = data[off + ngshaoffset:off + ngshaoffset + 20]
self.map[n] = count
count += 1
if count >= self.l:
break
off += self.s
cur += blocksize
def loadblock(self, blockstart, blocksize, data=None):
if self.all:
return
if data is None:
self.dataf.seek(blockstart)
if blockstart + blocksize > self.datasize:
# the revlog may have grown since we've started running,
# but we don't have space in self.index for more entries.
# limit blocksize so that we don't get too much data.
blocksize = max(self.datasize - blockstart, 0)
data = self.dataf.read(blocksize)
lend = len(data) / self.s
i = blockstart / self.s
off = 0
# lazyindex supports __delitem__
if lend > len(self.index) - i:
lend = len(self.index) - i
for x in xrange(lend):
if self.index[i + x] == None:
b = data[off : off + self.s]
self.index[i + x] = b
n = b[ngshaoffset:ngshaoffset + 20]
self.map[n] = i + x
off += self.s
def findnode(self, node):
"""search backwards through the index file for a specific node"""
if self.allmap:
return None
# hg log will cause many many searches for the manifest
# nodes. After we get called a few times, just load the whole
# thing.
if self.mapfind_count > 8:
self.loadmap()
if node in self.map:
return node
return None
self.mapfind_count += 1
last = self.l - 1
while self.index[last] != None:
if last == 0:
self.all = 1
self.allmap = 1
return None
last -= 1
end = (last + 1) * self.s
blocksize = self.s * 256
while end >= 0:
start = max(end - blocksize, 0)
self.dataf.seek(start)
data = self.dataf.read(end - start)
findend = end - start
while True:
# we're searching backwards, so we have to make sure
# we don't find a changeset where this node is a parent
off = data.find(node, 0, findend)
findend = off
if off >= 0:
i = off / self.s
off = i * self.s
n = data[off + ngshaoffset:off + ngshaoffset + 20]
if n == node:
self.map[n] = i + start / self.s
return node
else:
break
end -= blocksize
return None
def loadindex(self, i=None, end=None):
if self.all:
return
all = False
if i == None:
blockstart = 0
blocksize = (65536 / self.s) * self.s
end = self.datasize
all = True
else:
if end:
blockstart = i * self.s
end = end * self.s
blocksize = end - blockstart
else:
blockstart = (i & ~1023) * self.s
blocksize = self.s * 1024
end = blockstart + blocksize
while blockstart < end:
self.loadblock(blockstart, blocksize)
blockstart += blocksize
if all:
self.all = True
class lazyindex(object):
"""a lazy version of the index array"""
def __init__(self, parser):
self.p = parser
def __len__(self):
return len(self.p.index)
def load(self, pos):
if pos < 0:
pos += len(self.p.index)
self.p.loadindex(pos)
return self.p.index[pos]
def __getitem__(self, pos):
return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
def __setitem__(self, pos, item):
self.p.index[pos] = _pack(indexformatng, *item)
def __delitem__(self, pos):
del self.p.index[pos]
def insert(self, pos, e):
self.p.index.insert(pos, _pack(indexformatng, *e))
def append(self, e):
self.p.index.append(_pack(indexformatng, *e))
class lazymap(object):
"""a lazy version of the node map"""
def __init__(self, parser):
self.p = parser
def load(self, key):
n = self.p.findnode(key)
if n == None:
raise KeyError(key)
def __contains__(self, key):
if key in self.p.map:
return True
self.p.loadmap()
return key in self.p.map
def __iter__(self):
yield nullid
for i in xrange(self.p.l):
ret = self.p.index[i]
if not ret:
self.p.loadindex(i)
ret = self.p.index[i]
if isinstance(ret, str):
ret = _unpack(indexformatng, ret)
yield ret[7]
def __getitem__(self, key):
try:
return self.p.map[key]
except KeyError:
try:
self.load(key)
return self.p.map[key]
except KeyError:
raise KeyError("node " + hex(key))
def __setitem__(self, key, val):
self.p.map[key] = val
def __delitem__(self, key):
del self.p.map[key]
indexformatv0 = ">4l20s20s20s"
v0shaoffset = 56
class revlogoldio(object):
def __init__(self):
self.size = struct.calcsize(indexformatv0)
def parseindex(self, fp, inline):
s = self.size
index = []
nodemap = {nullid: nullrev}
n = off = 0
data = fp.read()
l = len(data)
while off + s <= l:
cur = data[off:off + s]
off += s
e = _unpack(indexformatv0, cur)
# transform to revlogv1 format
e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
index.append(e2)
nodemap[e[6]] = n
n += 1
return index, nodemap, None
def packentry(self, entry, node, version, rev):
e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
node(entry[5]), node(entry[6]), entry[7])
return _pack(indexformatv0, *e2)
# index ng:
# 6 bytes offset
# 2 bytes flags
# 4 bytes compressed length
# 4 bytes uncompressed length
# 4 bytes: base rev
# 4 bytes link rev
# 4 bytes parent 1 rev
# 4 bytes parent 2 rev
# 32 bytes: nodeid
indexformatng = ">Qiiiiii20s12x"
ngshaoffset = 32
versionformat = ">I"
class revlogio(object):
def __init__(self):
self.size = struct.calcsize(indexformatng)
def parseindex(self, fp, inline):
try:
size = util.fstat(fp).st_size
except AttributeError:
size = 0
if util.openhardlinks() and not inline and size > 1000000:
# big index, let's parse it on demand
parser = lazyparser(fp, size)
index = lazyindex(parser)
nodemap = lazymap(parser)
e = list(index[0])
type = gettype(e[0])
e[0] = offset_type(0, type)
index[0] = e
return index, nodemap, None
s = self.size
cache = None
index = []
nodemap = {nullid: nullrev}
n = off = 0
# if we're not using lazymap, always read the whole index
data = fp.read()
l = len(data) - s
append = index.append
if inline:
cache = (0, data)
while off <= l:
e = _unpack(indexformatng, data[off:off + s])
nodemap[e[7]] = n
append(e)
n += 1
if e[1] < 0:
break
off += e[1] + s
else:
while off <= l:
e = _unpack(indexformatng, data[off:off + s])
nodemap[e[7]] = n
append(e)
n += 1
off += s
e = list(index[0])
type = gettype(e[0])
e[0] = offset_type(0, type)
index[0] = e
return index, nodemap, cache
def packentry(self, entry, node, version, rev):
p = _pack(indexformatng, *entry)
if rev == 0:
p = _pack(versionformat, version) + p[4:]
return p
class revlog(object):
"""
the underlying revision storage object
A revlog consists of two parts, an index and the revision data.
The index is a file with a fixed record size containing
information on each revision, includings its nodeid (hash), the
nodeids of its parents, the position and offset of its data within
the data file, and the revision it's based on. Finally, each entry
contains a linkrev entry that can serve as a pointer to external
data.
The revision data itself is a linear collection of data chunks.
Each chunk represents a revision and is usually represented as a
delta against the previous chunk. To bound lookup time, runs of
deltas are limited to about 2 times the length of the original
version data. This makes retrieval of a version proportional to
its size, or O(1) relative to the number of revisions.
Both pieces of the revlog are written to in an append-only
fashion, which means we never need to rewrite a file to insert or
remove data, and can use some simple techniques to avoid the need
for locking while reading.
"""
def __init__(self, opener, indexfile):
"""
create a revlog object
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
"""
self.indexfile = indexfile
self.datafile = indexfile[:-2] + ".d"
self.opener = opener
self._cache = None
self._chunkcache = None
self.nodemap = {nullid: nullrev}
self.index = []
v = REVLOG_DEFAULT_VERSION
if hasattr(opener, "defversion"):
v = opener.defversion
if v & REVLOGNG:
v |= REVLOGNGINLINEDATA
i = ""
try:
f = self.opener(self.indexfile)
i = f.read(4)
f.seek(0)
if len(i) > 0:
v = struct.unpack(versionformat, i)[0]
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
self.version = v
self._inline = v & REVLOGNGINLINEDATA
flags = v & ~0xFFFF
fmt = v & 0xFFFF
if fmt == REVLOGV0 and flags:
raise RevlogError(_("index %s unknown flags %#04x for format v0")
% (self.indexfile, flags >> 16))
elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
raise RevlogError(_("index %s unknown flags %#04x for revlogng")
% (self.indexfile, flags >> 16))
elif fmt > REVLOGNG:
raise RevlogError(_("index %s unknown format %d")
% (self.indexfile, fmt))
self._io = revlogio()
if self.version == REVLOGV0:
self._io = revlogoldio()
if i:
d = self._io.parseindex(f, self._inline)
self.index, self.nodemap, self._chunkcache = d
# add the magic null revision at -1
self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
def _loadindex(self, start, end):
"""load a block of indexes all at once from the lazy parser"""
if isinstance(self.index, lazyindex):
self.index.p.loadindex(start, end)
def _loadindexmap(self):
"""loads both the map and the index from the lazy parser"""
if isinstance(self.index, lazyindex):
p = self.index.p
p.loadindex()
self.nodemap = p.map
def _loadmap(self):
"""loads the map from the lazy parser"""
if isinstance(self.nodemap, lazymap):
self.nodemap.p.loadmap()
self.nodemap = self.nodemap.p.map
def tip(self):
return self.node(len(self.index) - 2)
def count(self):
return len(self.index) - 1
def rev(self, node):
try:
return self.nodemap[node]
except KeyError:
raise LookupError(node, self.indexfile, _('no node'))
def node(self, rev):
return self.index[rev][7]
def linkrev(self, node):
return self.index[self.rev(node)][4]
def parents(self, node):
d = self.index[self.rev(node)][5:7]
return (self.node(d[0]), self.node(d[1]))
def parentrevs(self, rev):
return self.index[rev][5:7]
def start(self, rev):
return int(self.index[rev][0] >> 16)
def end(self, rev):
return self.start(rev) + self.length(rev)
def length(self, rev):
return self.index[rev][1]
def base(self, rev):
return self.index[rev][3]
def size(self, rev):
"""return the length of the uncompressed text for a given revision"""
l = self.index[rev][2]
if l >= 0:
return l
t = self.revision(self.node(rev))
return len(t)
# alternate implementation, The advantage to this code is it
# will be faster for a single revision. But, the results are not
# cached, so finding the size of every revision will be slower.
"""
if self.cache and self.cache[1] == rev:
return len(self.cache[2])
base = self.base(rev)
if self.cache and self.cache[1] >= base and self.cache[1] < rev:
base = self.cache[1]
text = self.cache[2]
else:
text = self.revision(self.node(base))
l = len(text)
for x in xrange(base + 1, rev + 1):
l = mdiff.patchedsize(l, self.chunk(x))
return l
"""
def reachable(self, node, stop=None):
"""return a hash of all nodes ancestral to a given node, including
the node itself, stopping when stop is matched"""
reachable = {}
visit = [node]
reachable[node] = 1
if stop:
stopn = self.rev(stop)
else:
stopn = 0
while visit:
n = visit.pop(0)
if n == stop:
continue
if n == nullid:
continue
for p in self.parents(n):
if self.rev(p) < stopn:
continue
if p not in reachable:
reachable[p] = 1
visit.append(p)
return reachable
def nodesbetween(self, roots=None, heads=None):
"""Return a tuple containing three elements. Elements 1 and 2 contain
a final list bases and heads after all the unreachable ones have been
pruned. Element 0 contains a topologically sorted list of all
nodes that satisfy these constraints:
1. All nodes must be descended from a node in roots (the nodes on
roots are considered descended from themselves).
2. All nodes must also be ancestors of a node in heads (the nodes in
heads are considered to be their own ancestors).
If roots is unspecified, nullid is assumed as the only root.
If heads is unspecified, it is taken to be the output of the
heads method (i.e. a list of all nodes in the repository that
have no children)."""
nonodes = ([], [], [])
if roots is not None:
roots = list(roots)
if not roots:
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
roots = [nullid] # Everybody's a descendent of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
return ([self.node(r) for r in xrange(0, self.count())],
[nullid], list(self.heads()))
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
highestrev = self.count() - 1
# Set ancestors to None to signal that every node is an ancestor.
ancestors = None
# Set heads to an empty dictionary for later discovery of heads
heads = {}
else:
heads = list(heads)
if not heads:
return nonodes
ancestors = {}
# Turn heads into a dictionary so we can remove 'fake' heads.
# Also, later we will be using it to filter out the heads we can't
# find from roots.
heads = dict.fromkeys(heads, 0)
# Start at the top and keep marking parents until we're done.
nodestotag = heads.keys()
# Remember where the top was so we can use it as a limit later.
highestrev = max([self.rev(n) for n in nodestotag])
while nodestotag:
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
if n == nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
r = self.rev(n)
if r >= lowestrev:
if n not in ancestors:
# If we are possibly a descendent of one of the roots
# and we haven't already been marked as an ancestor
ancestors[n] = 1 # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.extend([p for p in self.parents(n) if
p != nullid])
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
# any other heads.
heads.pop(n)
if not ancestors:
return nonodes
# Now that we have our set of ancestors, we want to remove any
# roots that are not ancestors.
# If one of the roots was nullid, everything is included anyway.
if lowestrev > nullrev:
# But, since we weren't, let's recompute the lowest rev to not
# include roots that aren't ancestors.
# Filter out roots that aren't ancestors of heads
roots = [n for n in roots if n in ancestors]
# Recompute the lowest revision
if roots:
lowestrev = min([self.rev(n) for n in roots])
else:
# No more roots? Return empty list
return nonodes
else:
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
roots = [nullid]
# Transform our roots list into a 'set' (i.e. a dictionary where the
# values don't matter.
descendents = dict.fromkeys(roots, 1)
# Also, keep the original roots so we can filter out roots that aren't
# 'real' roots (i.e. are descended from other roots).
roots = descendents.copy()
# Our topologically sorted list of output nodes.
orderedout = []
# Don't start at nullid since we don't want nullid in our output list,
# and if nullid shows up in descedents, empty parents will look like
# they're descendents.
for r in xrange(max(lowestrev, 0), highestrev + 1):
n = self.node(r)
isdescendent = False
if lowestrev == nullrev: # Everybody is a descendent of nullid
isdescendent = True
elif n in descendents:
# n is already a descendent
isdescendent = True
# This check only needs to be done here because all the roots
# will start being marked is descendents before the loop.
if n in roots:
# If n was a root, check if it's a 'real' root.
p = tuple(self.parents(n))
# If any of its parents are descendents, it's not a root.
if (p[0] in descendents) or (p[1] in descendents):
roots.pop(n)
else:
p = tuple(self.parents(n))
# A node is a descendent if either of its parents are
# descendents. (We seeded the dependents list with the roots
# up there, remember?)
if (p[0] in descendents) or (p[1] in descendents):
descendents[n] = 1
isdescendent = True
if isdescendent and ((ancestors is None) or (n in ancestors)):
# Only include nodes that are both descendents and ancestors.
orderedout.append(n)
if (ancestors is not None) and (n in heads):
# We're trying to figure out which heads are reachable
# from roots.
# Mark this head as having been reached
heads[n] = 1
elif ancestors is None:
# Otherwise, we're trying to discover the heads.
# Assume this is a head because if it isn't, the next step
# will eventually remove it.
heads[n] = 1
# But, obviously its parents aren't.
for p in self.parents(n):
heads.pop(p, None)
heads = [n for n in heads.iterkeys() if heads[n] != 0]
roots = roots.keys()
assert orderedout
assert roots
assert heads
return (orderedout, roots, heads)
def heads(self, start=None, stop=None):
"""return the list of all nodes that have no children
if start is specified, only heads that are descendants of
start will be returned
if stop is specified, it will consider all the revs from stop
as if they had no children
"""
if start is None and stop is None:
count = self.count()
if not count:
return [nullid]
ishead = [1] * (count + 1)
index = self.index
for r in xrange(count):
e = index[r]
ishead[e[5]] = ishead[e[6]] = 0
return [self.node(r) for r in xrange(count) if ishead[r]]
if start is None:
start = nullid
if stop is None:
stop = []
stoprevs = dict.fromkeys([self.rev(n) for n in stop])
startrev = self.rev(start)
reachable = {startrev: 1}
heads = {startrev: 1}
parentrevs = self.parentrevs
for r in xrange(startrev + 1, self.count()):
for p in parentrevs(r):
if p in reachable:
if r not in stoprevs:
reachable[r] = 1
heads[r] = 1
if p in heads and p not in stoprevs:
del heads[p]
return [self.node(r) for r in heads]
def children(self, node):
"""find the children of a given node"""
c = []
p = self.rev(node)
for r in range(p + 1, self.count()):
prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
if prevs:
for pr in prevs:
if pr == p:
c.append(self.node(r))
elif p == nullrev:
c.append(self.node(r))
return c
def _match(self, id):
if isinstance(id, (long, int)):
# rev
return self.node(id)
if len(id) == 20:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
node = id
r = self.rev(node) # quick search the index
return node
except LookupError:
pass # may be partial hex id
try:
# str(rev)
rev = int(id)
if str(rev) != id:
raise ValueError
if rev < 0:
rev = self.count() + rev
if rev < 0 or rev >= self.count():
raise ValueError
return self.node(rev)
except (ValueError, OverflowError):
pass
if len(id) == 40:
try:
# a full hex nodeid?
node = bin(id)
r = self.rev(node)
return node
except TypeError:
pass
def _partialmatch(self, id):
if len(id) < 40:
try:
# hex(node)[:...]
bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
node = None
for n in self.nodemap:
if n.startswith(bin_id) and hex(n).startswith(id):
if node is not None:
raise LookupError(id, self.indexfile,
_('ambiguous identifier'))
node = n
if node is not None:
return node
except TypeError:
pass
def lookup(self, id):
"""locate a node based on:
- revision number or str(revision number)
- nodeid or subset of hex nodeid
"""
n = self._match(id)
if n is not None:
return n
n = self._partialmatch(id)
if n:
return n
raise LookupError(id, self.indexfile, _('no match found'))
def cmp(self, node, text):
"""compare text with a given file revision"""
p1, p2 = self.parents(node)
return hash(text, p1, p2) != node
def chunk(self, rev, df=None):
def loadcache(df):
if not df:
if self._inline:
df = self.opener(self.indexfile)
else:
df = self.opener(self.datafile)
df.seek(start)
self._chunkcache = (start, df.read(cache_length))
start, length = self.start(rev), self.length(rev)
if self._inline:
start += (rev + 1) * self._io.size
end = start + length
offset = 0
if not self._chunkcache:
cache_length = max(65536, length)
loadcache(df)
else:
cache_start = self._chunkcache[0]
cache_length = len(self._chunkcache[1])
cache_end = cache_start + cache_length
if start >= cache_start and end <= cache_end:
# it is cached
offset = start - cache_start
else:
cache_length = max(65536, length)
loadcache(df)
# avoid copying large chunks
c = self._chunkcache[1]
if cache_length != length:
c = c[offset:offset + length]
return decompress(c)
def delta(self, node):
"""return or calculate a delta between a node and its predecessor"""
r = self.rev(node)
return self.revdiff(r - 1, r)
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
return self.chunk(rev2)
return mdiff.textdiff(self.revision(self.node(rev1)),
self.revision(self.node(rev2)))
def revision(self, node):
"""return an uncompressed revision of a given"""
if node == nullid:
return ""
if self._cache and self._cache[0] == node:
return str(self._cache[2])
# look up what we need to read
text = None
rev = self.rev(node)
base = self.base(rev)
# check rev flags
if self.index[rev][0] & 0xFFFF:
raise RevlogError(_('incompatible revision flag %x') %
(self.index[rev][0] & 0xFFFF))
df = None
# do we have useful data cached?
if self._cache and self._cache[1] >= base and self._cache[1] < rev:
base = self._cache[1]
text = str(self._cache[2])
self._loadindex(base, rev + 1)
if not self._inline and rev > base + 1:
df = self.opener(self.datafile)
else:
self._loadindex(base, rev + 1)
if not self._inline and rev > base:
df = self.opener(self.datafile)
text = self.chunk(base, df=df)
bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
text = mdiff.patches(text, bins)
p1, p2 = self.parents(node)
if node != hash(text, p1, p2):
raise RevlogError(_("integrity check failed on %s:%d")
% (self.datafile, rev))
self._cache = (node, rev, text)
return text
def checkinlinesize(self, tr, fp=None):
if not self._inline:
return
if not fp:
fp = self.opener(self.indexfile, 'r')
fp.seek(0, 2)
size = fp.tell()
if size < 131072:
return
trinfo = tr.find(self.indexfile)
if trinfo == None:
raise RevlogError(_("%s not found in the transaction")
% self.indexfile)
trindex = trinfo[2]
dataoff = self.start(trindex)
tr.add(self.datafile, dataoff)
df = self.opener(self.datafile, 'w')
try:
calc = self._io.size
for r in xrange(self.count()):
start = self.start(r) + (r + 1) * calc
length = self.length(r)
fp.seek(start)
d = fp.read(length)
df.write(d)
finally:
df.close()
fp.close()
fp = self.opener(self.indexfile, 'w', atomictemp=True)
self.version &= ~(REVLOGNGINLINEDATA)
self._inline = False
for i in xrange(self.count()):
e = self._io.packentry(self.index[i], self.node, self.version, i)
fp.write(e)
# if we don't call rename, the temp file will never replace the
# real index
fp.rename()
tr.replace(self.indexfile, trindex * calc)
self._chunkcache = None
def addrevision(self, text, transaction, link, p1, p2, d=None):
"""add a revision to the log
text - the revision data to add
transaction - the transaction object used for rollback
link - the linkrev data to add
p1, p2 - the parent nodeids of the revision
d - an optional precomputed delta
"""
dfh = None
if not self._inline:
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a+")
try:
return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
finally:
if dfh:
dfh.close()
ifh.close()
def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
node = hash(text, p1, p2)
if node in self.nodemap:
return node
curr = self.count()
prev = curr - 1
base = self.base(prev)
offset = self.end(prev)
if curr:
if not d:
ptext = self.revision(self.node(prev))
d = mdiff.textdiff(ptext, text)
data = compress(d)
l = len(data[1]) + len(data[0])
dist = l + offset - self.start(base)
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
if not curr or dist > len(text) * 2:
data = compress(text)
l = len(data[1]) + len(data[0])
base = curr
e = (offset_type(offset, 0), l, len(text),
base, link, self.rev(p1), self.rev(p2), node)
self.index.insert(-1, e)
self.nodemap[node] = curr
entry = self._io.packentry(e, self.node, self.version, curr)
if not self._inline:
transaction.add(self.datafile, offset)
transaction.add(self.indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
dfh.flush()
ifh.write(entry)
else:
offset += curr * self._io.size
transaction.add(self.indexfile, offset, curr)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
self.checkinlinesize(transaction, ifh)
self._cache = (node, curr, text)
return node
def ancestor(self, a, b):
"""calculate the least common ancestor of nodes a and b"""
def parents(rev):
return [p for p in self.parentrevs(rev) if p != nullrev]
c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
if c is None:
return nullid
return self.node(c)
def group(self, nodelist, lookup, infocollect=None):
"""calculate a delta group
Given a list of changeset revs, return a set of deltas and
metadata corresponding to nodes. the first delta is
parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
have this parent as it has all history before these
changesets. parent is parent[0]
"""
revs = [self.rev(n) for n in nodelist]
# if we don't have any revisions touched by these changesets, bail
if not revs:
yield changegroup.closechunk()
return
# add the parent of the first rev
p = self.parents(self.node(revs[0]))[0]
revs.insert(0, self.rev(p))
# build deltas
for d in xrange(0, len(revs) - 1):
a, b = revs[d], revs[d + 1]
nb = self.node(b)
if infocollect is not None:
infocollect(nb)
p = self.parents(nb)
meta = nb + p[0] + p[1] + lookup(nb)
if a == -1:
d = self.revision(nb)
meta += mdiff.trivialdiffheader(len(d))
else:
d = self.revdiff(a, b)
yield changegroup.chunkheader(len(meta) + len(d))
yield meta
if len(d) > 2**20:
pos = 0
while pos < len(d):
pos2 = pos + 2 ** 18
yield d[pos:pos2]
pos = pos2
else:
yield d
yield changegroup.closechunk()
def addgroup(self, revs, linkmapper, transaction, unique=0):
"""
add a delta group
given a set of deltas, add them to the revision log. the
first delta is against its parent, which should be in our
log, the rest are against the previous delta.
"""
#track the base of the current delta log
r = self.count()
t = r - 1
node = None
base = prev = nullrev
start = end = textlen = 0
if r:
end = self.end(t)
ifh = self.opener(self.indexfile, "a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
dfh = None
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
dfh = self.opener(self.datafile, "a")
try:
# loop through our set of deltas
chain = None
for chunk in revs:
node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
link = linkmapper(cs)
if node in self.nodemap:
# this can happen if two branches make the same change
# if unique:
# raise RevlogError(_("already have %s") % hex(node[:4]))
chain = node
continue
delta = buffer(chunk, 80)
del chunk
for p in (p1, p2):
if not p in self.nodemap:
raise LookupError(p, self.indexfile, _('unknown parent'))
if not chain:
# retrieve the parent revision of the delta chain
chain = p1
if not chain in self.nodemap:
raise LookupError(chain, self.indexfile, _('unknown base'))
# full versions are inserted when the needed deltas become
# comparable to the uncompressed text or when the previous
# version is not the one we have a delta against. We use
# the size of the previous full rev as a proxy for the
# current size.
if chain == prev:
cdelta = compress(delta)
cdeltalen = len(cdelta[0]) + len(cdelta[1])
textlen = mdiff.patchedsize(textlen, delta)
if chain != prev or (end - start + cdeltalen) > textlen * 2:
# flush our writes here so we can read it in revision
if dfh:
dfh.flush()
ifh.flush()
text = self.revision(chain)
if len(text) == 0:
# skip over trivial delta header
text = buffer(delta, 12)
else:
text = mdiff.patches(text, [delta])
del delta
chk = self._addrevision(text, transaction, link, p1, p2, None,
ifh, dfh)
if not dfh and not self._inline:
# addrevision switched from inline to conventional
# reopen the index
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
if chk != node:
raise RevlogError(_("consistency error adding group"))
textlen = len(text)
else:
e = (offset_type(end, 0), cdeltalen, textlen, base,
link, self.rev(p1), self.rev(p2), node)
self.index.insert(-1, e)
self.nodemap[node] = r
entry = self._io.packentry(e, self.node, self.version, r)
if self._inline:
ifh.write(entry)
ifh.write(cdelta[0])
ifh.write(cdelta[1])
self.checkinlinesize(transaction, ifh)
if not self._inline:
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
else:
dfh.write(cdelta[0])
dfh.write(cdelta[1])
ifh.write(entry)
t, r, chain, prev = r, r + 1, node, node
base = self.base(t)
start = self.start(base)
end = self.end(t)
finally:
if dfh:
dfh.close()
ifh.close()
return node
def strip(self, minlink):
"""truncate the revlog on the first revision with a linkrev >= minlink
This function is called when we're stripping revision minlink and
its descendants from the repository.
We have to remove all revisions with linkrev >= minlink, because
the equivalent changelog revisions will be renumbered after the
strip.
So we truncate the revlog on the first of these revisions, and
trust that the caller has saved the revisions that shouldn't be
removed and that it'll readd them after this truncation.
"""
if self.count() == 0:
return
if isinstance(self.index, lazyindex):
self._loadindexmap()
for rev in xrange(0, self.count()):
if self.index[rev][4] >= minlink:
break
else:
return
# first truncate the files on disk
end = self.start(rev)
if not self._inline:
df = self.opener(self.datafile, "a")
df.truncate(end)
end = rev * self._io.size
else:
end += rev * self._io.size
indexf = self.opener(self.indexfile, "a")
indexf.truncate(end)
# then reset internal state in memory to forget those revisions
self._cache = None
self._chunkcache = None
for x in xrange(rev, self.count()):
del self.nodemap[self.node(x)]
del self.index[rev:-1]
def checksize(self):
expected = 0
if self.count():
expected = max(0, self.end(self.count() - 1))
try:
f = self.opener(self.datafile)
f.seek(0, 2)
actual = f.tell()
dd = actual - expected
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
dd = 0
try:
f = self.opener(self.indexfile)
f.seek(0, 2)
actual = f.tell()
s = self._io.size
i = max(0, actual / s)
di = actual - (i * s)
if self._inline:
databytes = 0
for r in xrange(self.count()):
databytes += max(0, self.length(r))
dd = 0
di = actual - self.count() * s - databytes
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
di = 0
return (dd, di)
| carlgao/lenga | images/lenny64-peon/usr/share/python-support/mercurial-common/mercurial/revlog.py | Python | mit | 46,218 | [
"VisIt"
] | 21bd256dbbf87c9cfccbe529835f8e0532551f64aa19540cf1d2cc62dace20d0 |
import argparse
import logging
import multiprocessing
import time
from functools import partial, update_wrapper
from defaults import EXTRACTION_MAX_READ_PAIRS, EXTRACTION_MAX_NM, EXTRACTION_MAX_INTERVAL_TRUNCATION, EXTRACTION_TRUNCATION_PAD
import pysam
compl_table = [chr(i) for i in xrange(256)]
compl_table[ord('A')] = 'T'
compl_table[ord('C')] = 'G'
compl_table[ord('G')] = 'C'
compl_table[ord('T')] = 'A'
def compl(seq):
return "".join([compl_table[ord(i)] for i in seq])
def get_sequence_quality(aln):
if not aln.is_reverse:
return aln.seq.upper(), aln.qual
return compl(aln.seq.upper())[::-1], aln.qual[::-1]
def write_read(fd, aln):
end_id = 1 if aln.is_read1 else 2
sequence, quality = get_sequence_quality(aln)
fd.write("@%s/%d\n%s\n+\n%s\n" % (aln.qname, end_id, sequence, quality))
def is_hq(aln, chr_tid, chr_start, chr_end):
return aln.is_unmapped or aln.mapq>0 or (not (aln.tid==chr_tid and chr_start<=aln.pos<=chr_end))
def all_pair(aln, mate, chr_tid, chr_start, chr_end):
return True
def all_pair_hq(aln, mate, chr_tid, chr_start, chr_end):
return is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def get_nm(aln):
nm_str = aln.opt("NM")
return int(nm_str) if nm_str else 0
def perfect_aln(aln):
return not aln.is_unmapped and aln.is_proper_pair and len(aln.cigar) == 1 and get_nm(aln) <= EXTRACTION_MAX_NM
def non_perfect(aln, mate, chr_tid, chr_start, chr_end):
return not (perfect_aln(aln) and perfect_aln(mate))
def non_perfect_hq(aln, mate, chr_tid, chr_start, chr_end):
return (not (perfect_aln(aln) and perfect_aln(mate))) and is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def discordant(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
return not (isize_min <= abs(aln.tlen) <= isize_max)
def discordant_with_normal_orientation(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
if aln.is_reverse and mate.is_reverse or not aln.is_reverse and not mate.is_reverse: return False
return not (isize_min <= abs(aln.tlen) <= isize_max)
def get_mate(aln, bam_handles):
mate = None
for bam_handle in bam_handles:
try:
mate = bam_handle.mate(aln)
except ValueError:
pass
if mate is not None:
return mate
return mate
def extract_read_pairs(bam_handles, region, prefix, extract_fns, pad=0, max_read_pairs = EXTRACTION_MAX_READ_PAIRS,
truncation_pad_read_extract = EXTRACTION_TRUNCATION_PAD,
max_interval_len_truncation = EXTRACTION_MAX_INTERVAL_TRUNCATION, sv_type=''):
logger = logging.getLogger("%s-%s" % (extract_read_pairs.__name__, multiprocessing.current_process()))
extract_fn_names = [extract_fn.__name__ for extract_fn in extract_fns]
logger.info("Extracting reads for region %s with padding %d using functions %s" % (
region, pad, extract_fn_names))
chr_name = str(region.split(':')[0])
chr_start = int(region.split(':')[1].split("-")[0]) - pad
chr_end = int(region.split(':')[1].split('-')[1]) + pad
selected_pair_counts = [0] * len(extract_fn_names)
start_time = time.time()
if chr_start < 0:
regions_to_extract = []
logger.error("Skipping read extraction since interval too close to chromosome beginning")
else:
# Read alignments from the interval in memory and build a dictionary to get mate instead of calling bammate.mate() function
regions_to_extract = [(chr_name, chr_start, chr_end)]
if abs(chr_end-chr_start)>max_interval_len_truncation and sv_type in ["INV","DEL","DUP"]:
# For large SVs, middle sequences has no effect on genotyping. So, we only extract reads around breakpoints to speed up
truncate_start = chr_start + pad + truncation_pad_read_extract
truncate_end = chr_end - (pad + truncation_pad_read_extract)
logger.info("Truncate the reads in [%d-%d] for %s_%d_%d" % (truncate_start,truncate_end,chr_name,chr_start,chr_end))
regions_to_extract = [(chr_name, chr_start, truncate_start-1), (chr_name, truncate_end+1, chr_end)]
aln_list = [aln for (chr_, start_, end_) in regions_to_extract for bam_handle in bam_handles for aln in bam_handle.fetch(chr_, start=start_, end=end_) if not aln.is_secondary]
aln_dict = {}
for aln in aln_list:
if aln.qname not in aln_dict:
aln_dict[aln.qname] = [None, None]
aln_dict[aln.qname][0 if aln.is_read1 else 1] = aln
aln_pairs = []
if len(aln_dict) <= max_read_pairs:
logger.info("Building mate dictionary from %d reads" % len(aln_list))
for aln_pair in aln_dict.values():
missing_index = 0 if aln_pair[0] is None else (1 if aln_pair[1] is None else 2)
if missing_index < 2:
mate = get_mate(aln_pair[1 - missing_index], bam_handles)
if mate is not None:
aln_pair[missing_index] = mate
aln_pairs.append(aln_pair)
else:
aln_pairs.append(aln_pair)
else:
logger.info("Too many reads encountered for %s. Skipping read extraction. (%d >%d)"%(region, len(aln_dict),max_read_pairs))
ends = [(open("%s_%s_1.fq" % (prefix, name), "w"), open("%s_%s_2.fq" % (prefix, name), "w")) for name in
extract_fn_names]
chr_tid = bam_handles[0].gettid(chr_name) if bam_handles else -1
for first, second in aln_pairs:
for fn_index, extract_fn in enumerate(extract_fns):
if extract_fn(first, second,chr_tid,chr_start,chr_end):
write_read(ends[fn_index][0], first)
write_read(ends[fn_index][1], second)
selected_pair_counts[fn_index] += 1
for end1, end2 in ends:
end1.close()
end2.close()
logger.info("Examined %d pairs in %g seconds" % (len(aln_pairs), time.time() - start_time))
logger.info("Extraction counts %s" % (zip(extract_fn_names, selected_pair_counts)))
return zip([(end[0].name, end[1].name) for end in ends], selected_pair_counts)
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
parser = argparse.ArgumentParser(description="Extract reads and mates from a region for spades assembly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bams", nargs='+', help="BAM files to extract reads from", required=True, default=[])
parser.add_argument("--region", help="Samtools region string", required=True)
parser.add_argument("--prefix", help="Output FASTQ prefix", required=True)
parser.add_argument("--extract_fn", help="Extraction function", choices=["all_pair", "non_perfect", "discordant"],
default="all_pair")
parser.add_argument("--pad", help="Padding to apply on both sides of the interval", type=int, default=0)
parser.add_argument("--isize_min", help="Minimum insert size", default=200, type=int)
parser.add_argument("--isize_max", help="Maximum insert size", default=500, type=int)
parser.add_argument("--max_read_pairs", help="Maximum read pairs to extract for an interval",
default=EXTRACTION_MAX_READ_PAIRS, type=int)
args = parser.parse_args()
if args.extract_fn == 'all_pair':
extract_fn = all_pair
elif args.extract_fn == 'non_perfect':
extract_fn = non_perfect
else:
extract_fn = partial(discordant, isize_min=args.isize_min, isize_max=args.isize_max)
update_wrapper(extract_fn, discordant)
bam_handles = [pysam.Samfile(bam, "rb") for bam in args.bams]
extract_read_pairs(bam_handles, args.region, args.prefix, [extract_fn], pad=args.pad,
max_read_pairs=args.max_read_pairs)
for bam_handle in bam_handles:
bam_handle.close()
| msahraeian/metasv | metasv/extract_pairs.py | Python | bsd-2-clause | 8,155 | [
"pysam"
] | f944f4198024613b040a80f137e2055fd190ebd0c5e5c2c69a01c717f40114ba |
import numpy as np
import time
import matplotlib.pyplot as plt
import os, pdb, sys
import scipy.optimize
import scipy.interpolate
import scipy.ndimage
import glob
import atpy
# TODO - wrap these routines up as attributes of spectroscopy
# opjects somehow.
def calc_spectra_variations( spectra, ref_spectrum, max_wavshift=5, dwav=0.01, smoothing_fwhm=None, disp_bound_ixs=[] ):
"""
GIVEN SPECTRA EXTRACTED FROM EACH FRAME AND A REFERENCE
SPECTRUM, WILL HORIZONTALLY SHIFT AND VERTICALLY STRETCH
EACH OF THE FORMER TO GIVE THE CLOSEST MATCH TO THE
REFERENCE SPECTRUM, AND RETURNS THE SHIFTS AND STRETCHES.
NOTE THAT DWAV IS IN UNITS OF PIXELS, AND THE RETURNED
WAVSHIFTS VARIABLE IS ALSO IN UNITS OF PIXELS.
Returns:
dspec - nframes x ndisp array containing the rescaled shifted spectrum minus the
reference spectrum.
wavshifts - The amounts by which the reference spectrum had to be shifted along the shift
in pixels along the dispersion axis in pixels to match the individual spectra.
vstretches - The amounts by which the reference spectrum have to be vertically stretched
to give the best match to the individual spectra.
"""
frame_axis = 0 # TODO = this should call the object property
disp_axis = 1 # TODO = this should call the object property
nframes, ndisp = np.shape( spectra ) # TODO = this should call the object property
# Convert smoothing fwhm to the standard deviation of the
# Gaussian kernel, and smooth the reference spectrum:
if smoothing_fwhm!=None:
smoothing_sig = smoothing_fwhm/2./np.sqrt( 2.*np.log( 2. ) )
ref_spectrum = scipy.ndimage.filters.gaussian_filter1d( ref_spectrum, smoothing_sig )
else:
smoothing_sig = None
# Interpolate the reference spectrum on to a grid of
# increments equal to the dwav shift increment:
dwavs = np.r_[-max_wavshift:max_wavshift+dwav:dwav]
nshifts = len( dwavs )
pad = max_wavshift+1
x = np.arange( ndisp )
xi = np.arange( -pad, ndisp+pad )
z = np.zeros( pad )
ref_spectrumi = np.concatenate( [ z, ref_spectrum, z ] )
#print 'aaaaa'
interpf = scipy.interpolate.interp1d( xi, ref_spectrumi, kind='cubic' )
#print 'bbbbb'
shifted = np.zeros( [ nshifts, ndisp ] )
for i in range( nshifts ):
shifted[i,:] = interpf( x+dwavs[i] )
# Now loop over the individual spectra and determine which
# of the shifted reference spectra gives the best match:
print '\nDetermining shifts and stretches:'
wavshifts = np.zeros( nframes )
vstretches = np.zeros( nframes )
dspec = np.zeros( [ nframes, ndisp ] )
enoise = np.zeros( [ nframes, ndisp ] )
ix0 = disp_bound_ixs[0]
ix1 = disp_bound_ixs[1]
A = np.ones([ndisp,2])
coeffs = []
for i in range( nframes ):
print i+1, nframes
rms_i = np.zeros( nshifts )
diffs = np.zeros( [ nshifts, ndisp ] )
vstretches_i = np.zeros( nshifts )
for j in range( nshifts ):
A[:,1] = shifted[j,:]
b = np.reshape( spectra[i,:], [ ndisp, 1 ] )
res = np.linalg.lstsq( A, b )
c = res[0].flatten()
fit = np.dot( A, c )
vstretches_i[j] = c[1]
diffs[j,:] = spectra[i,:] - fit
rms_i[j] = np.sqrt( np.mean( diffs[j,:][ix0:ix1+1]**2. ) )
ix = np.argmin( rms_i )
dspec[i,:] = diffs[ix,:]#/ref_spectrum
enoise[i,:] = np.sqrt( spectra[i,:] )#/ref_spectrum
wavshifts[i] = dwavs[ix]
vstretches[i] = vstretches_i[ix]
print '--> wavshift={0:.3f}, vstretch={1:.3f}'.format( dwavs[ix], vstretches_i[ix] )
if 0:
plt.ion()
plt.figure()
plt.plot( dwavs, rms_i, '-ok' )
plt.axvline( dwavs[ix], c='r' )
plt.title( wavshifts[i] )
pdb.set_trace()
plt.close('all')
return dspec, wavshifts, vstretches, enoise
def extract_spatscan_spectra( image_cube, ap_radius=60, ninterp=10000, cross_axis=0, disp_axis=1, frame_axis=2 ):
"""
GIVEN IMAGES WILL CALCULATE THE CENTER OF THE SPATIAL
SCAN AND INTEGRATE WITHIN SPECIFIED APERTURE ABOUT
THIS CENTER ALONG THE CROSS-DISPERSION AXIS.
"""
z = np.shape( image_cube ) # maybe build this into object
ncross = z[cross_axis]
ndisp = z[disp_axis]
nframes = z[frame_axis]
spectra = np.zeros( [ nframes, ndisp ] )
cdcs = np.zeros( nframes )
x = np.arange( ncross )
nf = int( ninterp*len( x ) )
xf = np.r_[ x.min():x.max():1j*nf ]
print '\nExtracting spectra from 2D images:'
for i in range( nframes ):
print '... image {0} of {1}'.format( i+1, nframes )
image = image_cube[:,:,i]
# Extract the cross-dispersion profile, i.e. along
# the axis of the spatial scan:
cdp = np.sum( image, axis=disp_axis )
# Interpolate cross-dispersion profile to finer grid
# in order to track sub-pixel shifts:
cdpf = np.interp( xf, x, cdp )
# Only consider points above the background level,
# otherwise blank sky will bias the result:
thresh = cdp.min() + 0.05*( cdp.max()-cdp.min() )
ixs = ( cdpf>thresh )
# Determine the center of the scan by taking the
# point midway between the edges:
cdcs[i] = np.mean( xf[ixs] )
# Determine the cross-dispersion coordinates between
# which the integration will be performed:
xmin = max( [ 0, cdcs[i] - ap_radius ] )
xmax = min( [ cdcs[i] + ap_radius, ncross-1 ] )
# Determine the rows that are fully contained
# within the aperture and integrate along the
# cross-dispersion axis:
xmin_full = int( np.ceil( xmin ) )
xmax_full = int( np.floor( xmax ) )
ixs_full = ( x>=xmin_full )*( x<=xmax_full )
spectra[i,:] = np.sum( image[ixs_full,:], axis=cross_axis )
# Determine any rows that are partially contained
# within the aperture at either end of the scan and
# add their weighted contributions to the spectrum:
if xmin_full>0:
xlow_partial = xmin_full - xmin
spectra[i,:] += xlow_partial*image[xmin_full,:]
if xmax_full+1<ncross:
xupp_partial = xmax - xmax_full
spectra[i,:] += xupp_partial*image[xmax_full+1,:]
# DELETE BELOW
#plt.figure()
#plt.subplot( 121 )
#plt.imshow( image, interpolation='nearest', aspect='auto', vmin=0 )
#plt.axhline( cdcs[i], c='m' )
#plt.subplot( 122 )
#plt.plot( xf, cdpf, '-c' )
#plt.plot( xf[ixs], cdpf[ixs], '-r', lw=2 )
#plt.axvline( cdcs[i], c='k', ls='-' )
#pdb.set_trace()
# DELETE ABOVE
return cdcs, spectra
| tomevans/spectroscopy | spectroscopy/wfc3_routines_BACKUP.py | Python | gpl-2.0 | 6,854 | [
"Gaussian"
] | cc6fa403e2797216932bebb0cd30ab8da4d3357d05d2b3813f2fd255e36802cd |
from __future__ import print_function
import string
import sys
from collections import deque
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from . import _hmmc
from .utils import normalize
decoder_algorithms = frozenset(("viterbi", "map"))
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
class ConvergenceMonitor(object):
"""Monitors and reports convergence to :data:`sys.stderr`.
Parameters
----------
thresh : double
Convergence threshold. The algorithm has convereged eitehr if
the maximum number of iterations is reached or the log probability
improvement between the two consecutive iterations is less than
threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
If ``True`` then per-iteration convergence reports are printed,
otherwise the monitor is mute.
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
"""
fmt = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, thresh, n_iter, verbose):
self.thresh = thresh
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 1
def report(self, logprob):
if self.history and self.verbose:
delta = logprob - self.history[-1]
message = self.fmt.format(
iter=self.iter, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
return (self.iter == self.n_iter or
(len(self.history) == 2 and
self.history[1] - self.history[0] < self.thresh))
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
Decoder algorithm.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Maximum number of iterations to perform.
thresh : float, optional
Convergence threshold.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, verbose=False,
params=string.ascii_letters,
init_params=string.ascii_letters):
# TODO: move all validation from descriptors to 'fit' and 'predict'.
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.monitor_ = ConvergenceMonitor(thresh, n_iter, verbose)
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.transmat_mask = None
self.startprob_mask = None
if transmat is not None:
self.transmat_mask = (transmat > np.finfo(float).eps)
if startprob is not None:
self.startprob_mask = (startprob > np.finfo(float).eps)
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float64).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self.algorithm in decoder_algorithms:
algorithm = self.algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
self._init(obs, self.init_params)
for i in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations <= 1:
return
lneta = np.zeros((n_observations - 1, n_components, n_components))
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(self.startprob_prior - 1.0 + stats['start'], mask=self.startprob_mask)
if 't' in params:
transmat_ = normalize(self.transmat_prior - 1.0 + stats['trans'], axis=1, mask=self.transmat_mask)
self.transmat_ = transmat_
| matthiasplappert/hmmlearn | hmmlearn/base.py | Python | bsd-3-clause | 21,078 | [
"Gaussian"
] | 809af31c57a4fcbccae2fbf59eda6df1e048a7b4ebfae43df2dcb2b52356d688 |
import time
import hashlib
from flask import redirect, flash, url_for, request
from flask.ext.admin import BaseView, expose
from flask.ext.login import current_user
from application import app
def generate_whmcs_url(goto=None):
""" Creates a link that allows an user to visit their billing account
http://docs.whmcs.com/AutoAuth
This is helpful because WHMCS doesn't have LDAP support.
"""
timestamp = str(int(time.time()))
whmcs_url = app.config['WHMCS_URL']
key = app.config['AUTO_AUTH_KEY'] # set in WHMCS config
hash = hashlib.sha1(current_user.email + timestamp + key)
hash_string = hash.hexdigest()
url = "%s?email=%s×tamp=%s&hash=%s&goto=%s" % (
whmcs_url, current_user.email, timestamp, hash_string, goto
)
return url
class BillingRedirect(BaseView):
def is_accessible(self):
return current_user.is_authenticated()
# redirect to login if not authenticated
def _handle_view(self, name, **kwargs):
if not self.is_accessible():
flash('Please log in to access this page.', 'error')
return redirect(url_for('login.index', next=request.url))
@expose('/')
def index(self):
return redirect(generate_whmcs_url("clientarea.php"))
| pawl/MakerManager2 | application/views/billing_redirect.py | Python | mit | 1,276 | [
"VisIt"
] | 00ee55f93c87f13097d5a88b8dd3f0fdd97f5bbc610989b5a98299b081a235ed |
# Copyright 2005-2008 by Frank Kauff & Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Nodes.py
#
# Provides functionality of a linked list.
# Each node has one (or none) predecessor, and an arbitrary number of successors.
# Nodes can store arbitrary data in a NodeData class.
#
# Subclassed by Nexus.Trees to store phylogenetic trees.
#
# Bug reports to Frank Kauff (fkauff@biologie.uni-kl.de)
#
class ChainException(Exception):
pass
class NodeException(Exception):
pass
class Chain:
"""Stores a list of nodes that are linked together."""
def __init__(self):
"""Initiates a node chain: (self)."""
self.chain={}
self.id=-1
def _get_id(self):
"""Gets a new id for a node in the chain."""
self.id+=1
return self.id
def all_ids(self):
"""Return a list of all node ids."""
return self.chain.keys()
def add(self,node,prev=None):
"""Attaches node to another: (self, node, prev)."""
if prev is not None and prev not in self.chain:
raise ChainException('Unknown predecessor: '+str(prev))
else:
id=self._get_id()
node.set_id(id)
node.set_prev(prev)
if prev is not None:
self.chain[prev].add_succ(id)
self.chain[id]=node
return id
def collapse(self,id):
"""Deletes node from chain and relinks successors to predecessor: collapse(self, id)."""
if id not in self.chain:
raise ChainException('Unknown ID: '+str(id))
prev_id=self.chain[id].get_prev()
self.chain[prev_id].remove_succ(id)
succ_ids=self.chain[id].get_succ()
for i in succ_ids:
self.chain[i].set_prev(prev_id)
self.chain[prev_id].add_succ(succ_ids)
node=self.chain[id]
self.kill(id)
return node
def kill(self,id):
"""Kills a node from chain without caring to what it is connected: kill(self,id)."""
if id not in self.chain:
raise ChainException('Unknown ID: '+str(id))
else:
del self.chain[id]
def unlink(self,id):
"""Disconnects node from his predecessor: unlink(self,id)."""
if id not in self.chain:
raise ChainException('Unknown ID: '+str(id))
else:
prev_id=self.chain[id].prev
if prev_id is not None:
self.chain[prev_id].succ.pop(self.chain[prev_id].succ.index(id))
self.chain[id].prev=None
return prev_id
def link(self, parent,child):
"""Connects son to parent: link(self,son,parent)."""
if child not in self.chain:
raise ChainException('Unknown ID: '+str(child))
elif parent not in self.chain:
raise ChainException('Unknown ID: '+str(parent))
else:
self.unlink(child)
self.chain[parent].succ.append(child)
self.chain[child].set_prev(parent)
def is_parent_of(self,parent,grandchild):
"""Check if grandchild is a subnode of parent: is_parent_of(self,parent,grandchild)."""
if grandchild==parent or grandchild in self.chain[parent].get_succ():
return True
else:
for sn in self.chain[parent].get_succ():
if self.is_parent_of(sn,grandchild):
return True
else:
return False
def trace(self,start,finish):
"""Returns a list of all node_ids between two nodes (excluding start, including end): trace(start,end)."""
if start not in self.chain or finish not in self.chain:
raise NodeException('Unknown node.')
if not self.is_parent_of(start,finish) or start==finish:
return []
for sn in self.chain[start].get_succ():
if self.is_parent_of(sn,finish):
return [sn]+self.trace(sn,finish)
class Node:
"""A single node."""
def __init__(self,data=None):
"""Represents a node with one predecessor and multiple successors: (self, data=None)."""
self.id=None
self.data=data
self.prev=None
self.succ=[]
def set_id(self,id):
"""Sets the id of a node, if not set yet: (self,id)."""
if self.id is not None:
raise NodeException('Node id cannot be changed.')
self.id=id
def get_id(self):
"""Returns the node's id: (self)."""
return self.id
def get_succ(self):
"""Returns a list of the node's successors: (self)."""
return self.succ
def get_prev(self):
"""Returns the id of the node's predecessor: (self)."""
return self.prev
def add_succ(self,id):
"""Adds a node id to the node's successors: (self,id)."""
if isinstance(id,type([])):
self.succ.extend(id)
else:
self.succ.append(id)
def remove_succ(self,id):
"""Removes a node id from the node's successors: (self,id)."""
self.succ.remove(id)
def set_succ(self,new_succ):
"""Sets the node's successors: (self,new_succ)."""
if not isinstance(new_succ,type([])):
raise NodeException('Node successor must be of list type.')
self.succ=new_succ
def set_prev(self,id):
"""Sets the node's predecessor: (self,id)."""
self.prev=id
def get_data(self):
"""Returns a node's data: (self)."""
return self.data
def set_data(self,data):
"""Sets a node's data: (self,data)."""
self.data=data
| BlogomaticProject/Blogomatic | opt/blog-o-matic/usr/lib/python/Bio/Nexus/Nodes.py | Python | gpl-2.0 | 5,759 | [
"Biopython"
] | b453174e4f47d573ca4be825b4e6cb63a3dd452d02c61439562fd11ddb445dfd |
# -*- coding: utf-8 -*-
import typing as T
import moose._moose as _moose
_tick = 8
_base = "/_utils"
_path = _base + "/y{0}"
_counter = 0
_plots : T.List[_Plot] = []
_moose.Neutral(_base)
_defaultFields = {
_moose.Compartment: "Vm",
_moose.ZombieCompartment: "Vm",
_moose.HHChannel: "Gk",
_moose.ZombieHHChannel: "Gk",
_moose.HHChannel2D: "Gk",
_moose.SynChan: "Gk",
_moose.CaConc: "Ca",
_moose.ZombieCaConc: "Ca",
_moose.Pool: "conc",
_moose.ZombiePool: "conc",
_moose.ZPool: "conc",
_moose.BufPool: "conc",
_moose.ZombieBufPool: "conc",
_moose.ZBufPool: "conc",
_moose.FuncPool: "conc",
_moose.ZombieFuncPool: "conc",
_moose.ZFuncPool: "conc",
}
def _defaultField(obj):
return _defaultFields[type(obj)]
def setDt(dt):
"""-----------
Description
-----------
Sets time-step for recording values.
---------
Arguments
---------
dt: Time-step for recording values.
-------
Returns
-------
Nothing."""
_moose.setClock(_tick, dt)
class SetupError(Exception):
pass
def _time(npoints=None):
import numpy
if npoints is None:
try:
npoints = len(_plots[0].vec)
except IndexError:
raise SetupError(
"List of time-points cannot be constructed because "
"no plots have been set up yet."
)
begin = 0.0
end = _moose.Clock("/clock").currentTime
return numpy.linspace(begin, end, npoints)
class _Plot(_moose.Table):
def __init__(self, path, obj, field, label):
_moose.Table.__init__(self, path)
self._table = _moose.Table(path)
self.obj = obj
self.field = field
self.label = label
@property
def values(self):
return self._table.vec
@property
def size(self):
return len(self.values)
@property
def time(self):
return _time(self.size)
def __iter__(self):
return iter(self.values)
def record(obj, field=None, label=None):
""""""
global _counter
# Checking if object is an iterable like list or a tuple, but not a string.
if hasattr(obj, "__iter__"):
return [record(o, field, label) for o in obj]
if isinstance(obj, str):
obj = _moose.element(obj)
if field is None:
field = _defaultField(obj)
path = _path.format(_counter)
_counter += 1
p = _Plot(path, obj, field, label)
_plots.append(p)
_moose.connect(p, "requestData", obj, "get_" + field)
_moose.useClock(_tick, path, "process")
return p
def _label(plot, labelFormat="{path}.{field}"):
# Over-ride label format if label has been given explicitly.
if plot.label:
labelFormat = plot.label
return labelFormat.format(path=plot.obj.path, name=plot.obj.name, field=plot.field)
def _selectedPlots(selected):
if selected is None:
# Returning a copy of this list, instead of reference. The returned
# list will be manipulated later.
return _plots[:]
elif isinstance(selected, _Plot):
return [selected]
else:
return selected
def saveCSV(
fileName,
selected=None,
delimiter="\t",
header=True,
headerCommentCharacter="#",
labelFormat="{path}.{field}",
timeCol=True,
timeHeader="Time",
fileMode="w",
):
""""""
import csv
plots = _selectedPlots(selected)
if header:
header = []
if timeCol:
header.append(timeHeader)
for plot in plots:
header.append(_label(plot, labelFormat))
header[0] = headerCommentCharacter + header[0]
if timeCol:
plots.insert(0, _time())
with open(fileName, fileMode) as fout:
writer = csv.writer(fout, delimiter=delimiter)
if header:
writer.writerow(header)
writer.writerows(list(zip(*plots)))
def saveXPLOT(fileName, selected=None, labelFormat="{path}.{field}", fileMode="w"):
""""""
plots = _selectedPlots(selected)
with open(fileName, fileMode) as fout:
write = lambda line: fout.write(line + "\n")
for (i, plot) in enumerate(plots):
label = "/plotname " + _label(plot, labelFormat)
if i > 0:
write("")
write("/newplot")
write(label)
for value in plot:
write(str(value))
def show(
selected=None,
combine=True,
labelFormat="{path}.{field}",
xLabel="Time (s)",
yLabel="{field}",
):
""""""
try:
from matplotlib import pyplot as plt
except ImportError:
print("Warning: recording.show(): Cannot find 'matplotlib'. Not showing plots.")
return
plots = _selectedPlots(selected)
if combine:
plt.figure()
for plot in plots:
if not combine:
plt.figure()
print(_label(plot))
plt.plot(plot.time, plot.values, label=_label(plot))
plt.legend()
plt.show()
def HDF5():
pass
| dilawar/moose-core | python/moose/recording.py | Python | gpl-3.0 | 5,054 | [
"MOOSE"
] | bf29287c44cfceefb81897b8192ce41efd2da39d37f6351cb72b0e0fd0842679 |
import galaxy.model
from base.twilltestcase import TwillTestCase
from base.test_db_util import (
get_user,
get_private_role,
get_latest_history_for_user,
get_default_history_permissions_by_history,
get_latest_dataset,
refresh,
get_default_user_permissions_by_user,
get_dataset_permissions_by_dataset,
)
regular_user1 = regular_user2 = regular_user3 = None
admin_user = None
admin_user_private_role = regular_user1_private_role = None
regular_user2_private_role = None
class TestDataSecurity( TwillTestCase ):
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
self.logout()
self.login( email='test1@bx.psu.edu', username='regular-user1' )
global regular_user1
regular_user1 = get_user( 'test1@bx.psu.edu' )
assert regular_user1 is not None, 'Problem retrieving user with email "test1@bx.psu.edu" from the database'
global regular_user1_private_role
regular_user1_private_role = get_private_role( regular_user1 )
self.logout()
self.login( email='test2@bx.psu.edu', username='regular-user2' )
global regular_user2
regular_user2 = get_user( 'test2@bx.psu.edu' )
assert regular_user2 is not None, 'Problem retrieving user with email "test2@bx.psu.edu" from the database'
global regular_user2_private_role
regular_user2_private_role = get_private_role( regular_user2 )
self.logout()
self.login( email='test3@bx.psu.edu', username='regular-user3' )
global regular_user3
regular_user3 = get_user( 'test3@bx.psu.edu' )
assert regular_user3 is not None, 'Problem retrieving user with email "test3@bx.psu.edu" from the database'
global regular_user3_private_role
regular_user3_private_role = get_private_role( regular_user3 )
self.logout()
self.login( email='test@bx.psu.edu', username='admin-user' )
global admin_user
admin_user = get_user( 'test@bx.psu.edu' )
assert admin_user is not None, 'Problem retrieving user with email "test@bx.psu.edu" from the database'
global admin_user_private_role
admin_user_private_role = get_private_role( admin_user )
def test_005_default_permissions( self ):
"""Testing initial settings for DefaultUserPermissions and DefaultHistoryPermissions"""
# Logged in as admin_user
# Make sure DefaultUserPermissions are correct
dups = get_default_user_permissions_by_user( admin_user )
if len( dups ) > 1:
raise AssertionError( '%d DefaultUserPermissions associated with user %s ( should be 1 )' \
% ( len( admin_user.default_permissions ), admin_user.email ) )
dup = dups[0]
if not dup.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DefaultUserPermission.action for user "%s" is "%s", but it should be "%s"' \
% ( admin_user.email, dup.action, galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
# Make sure DefaultHistoryPermissions are correct
latest_history = get_latest_history_for_user( admin_user )
dhps = get_default_history_permissions_by_history( latest_history )
if len( dhps ) > 1:
raise AssertionError( '%d DefaultHistoryPermissions were created for history id %d when it was created ( should have been 1 )' \
% ( len( latest_history.default_permissions ), latest_history.id ) )
dhp = dhps[0]
if not dhp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DefaultHistoryPermission.action for history id %d is "%s", but it should be "%s"' \
% ( latest_history.id, dhp.action, galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
self.manage_roles_and_groups_for_user( self.security.encode_id( admin_user.id ),
strings_displayed=[ admin_user.email ] )
# Try deleting the admin_user's private role
self.manage_roles_and_groups_for_user( self.security.encode_id( admin_user.id ),
out_role_ids=str( admin_user_private_role.id ),
strings_displayed=[ "You cannot eliminate a user's private role association." ] )
def test_010_private_role_creation_and_default_history_permissions( self ):
"""Testing private role creation and changing DefaultHistoryPermissions for new histories"""
# Logged in as admin_user
self.logout()
# Some of the history related tests here are similar to some tests in the
# test_history_functions.py script, so we could potentially eliminate 1 or 2 of them.
self.login( email='test1@bx.psu.edu' )
global regular_user1
regular_user1 = get_user( 'test1@bx.psu.edu' )
assert regular_user1 is not None, 'Problem retrieving user with email "test1@bx.psu.edu" from the database'
# Add a dataset to the history
self.upload_file( '1.bed' )
latest_dataset = get_latest_dataset()
# Make sure DatasetPermissions are correct - default is 'manage permissions'
dps = get_dataset_permissions_by_dataset( latest_dataset )
if len( dps ) > 1:
raise AssertionError( '%d DatasetPermissions were created for dataset id %d when it was created ( should have been 1 )' \
% ( len( dps ), latest_dataset.id ) )
dp = dps[0]
if not dp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DatasetPermissions.action for dataset id %d is "%s", but it should be "manage permissions"' \
% ( latest_dataset.id, dp.action ) )
# Change DefaultHistoryPermissions for regular_user1
permissions_in = []
actions_in = []
for key, value in galaxy.model.Dataset.permitted_actions.items():
# Setting the 'access' permission with the private role makes this dataset private
permissions_in.append( key )
actions_in.append( value.action )
# Sort actions for later comparison
actions_in.sort()
self.user_set_default_permissions( permissions_in=permissions_in, role_id=str( regular_user1_private_role.id ) )
# Make sure the default permissions are changed for new histories
self.new_history()
# logged in as regular_user1
latest_history = get_latest_history_for_user( regular_user1 )
if len( latest_history.default_permissions ) != len( actions_in ):
raise AssertionError( '%d DefaultHistoryPermissions were created for history id %d, should have been %d' % \
( len( latest_history.default_permissions ), latest_history.id, len( actions_in ) ) )
dhps = []
for dhp in latest_history.default_permissions:
dhps.append( dhp.action )
# Sort permissions for later comparison
dhps.sort()
for key, value in galaxy.model.Dataset.permitted_actions.items():
if value.action not in dhps:
raise AssertionError( '%s not in history id %d default_permissions after they were changed' % ( value.action, latest_history.id ) )
# Add a dataset to the history
self.upload_file( '1.bed' )
latest_dataset = get_latest_dataset()
# Make sure DatasetPermissions are correct
if len( latest_dataset.actions ) != len( latest_history.default_permissions ):
raise AssertionError( '%d DatasetPermissions were created for dataset id %d when it was created ( should have been %d )' % \
( len( latest_dataset.actions ), latest_dataset.id, len( latest_history.default_permissions ) ) )
dps = []
for dp in latest_dataset.actions:
dps.append( dp.action )
# Sort actions for later comparison
dps.sort()
# Compare DatasetPermissions with permissions_in - should be the same
if dps != actions_in:
raise AssertionError( 'DatasetPermissions "%s" for dataset id %d differ from changed default permissions "%s"' \
% ( str( dps ), latest_dataset.id, str( actions_in ) ) )
# Compare DefaultHistoryPermissions and DatasetPermissions - should be the same
if dps != dhps:
raise AssertionError( 'DatasetPermissions "%s" for dataset id %d differ from DefaultHistoryPermissions "%s" for history id %d' \
% ( str( dps ), latest_dataset.id, str( dhps ), latest_history.id ) )
def test_015_change_default_permissions_for_current_history( self ):
"""Testing changing DefaultHistoryPermissions for the current history"""
# logged in a regular_user1
self.logout()
self.login( email=regular_user2.email )
latest_history = get_latest_history_for_user( regular_user2 )
self.upload_file( '1.bed' )
latest_dataset = get_latest_dataset()
permissions_in = [ 'DATASET_MANAGE_PERMISSIONS' ]
# Make sure these are in sorted order for later comparison
actions_in = [ 'manage permissions' ]
permissions_out = [ 'DATASET_ACCESS' ]
# Change DefaultHistoryPermissions for the current history
self.history_set_default_permissions( permissions_out=permissions_out, permissions_in=permissions_in, role_id=str( regular_user2_private_role.id ) )
if len( latest_history.default_permissions ) != len( actions_in ):
raise AssertionError( '%d DefaultHistoryPermissions were created for history id %d, should have been %d' \
% ( len( latest_history.default_permissions ), latest_history.id, len( permissions_in ) ) )
# Make sure DefaultHistoryPermissions were correctly changed for the current history
dhps = []
for dhp in latest_history.default_permissions:
dhps.append( dhp.action )
# Sort permissions for later comparison
dhps.sort()
# Compare DefaultHistoryPermissions and actions_in - should be the same
if dhps != actions_in:
raise AssertionError( 'DefaultHistoryPermissions "%s" for history id %d differ from actions "%s" passed for changing' \
% ( str( dhps ), latest_history.id, str( actions_in ) ) )
# Make sure DatasetPermissionss are correct
if len( latest_dataset.actions ) != len( latest_history.default_permissions ):
raise AssertionError( '%d DatasetPermissionss were created for dataset id %d when it was created ( should have been %d )' \
% ( len( latest_dataset.actions ), latest_dataset.id, len( latest_history.default_permissions ) ) )
dps = []
for dp in latest_dataset.actions:
dps.append( dp.action )
# Sort actions for comparison
dps.sort()
# Compare DatasetPermissionss and DefaultHistoryPermissions - should be the same
if dps != dhps:
raise AssertionError( 'DatasetPermissionss "%s" for dataset id %d differ from DefaultHistoryPermissions "%s"' \
% ( str( dps ), latest_dataset.id, str( dhps ) ) )
def test_999_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
# Logged in as regular_user2
self.logout()
self.login( email=admin_user.email )
##################
# Make sure all users are associated only with their private roles
##################
for user in [ admin_user, regular_user1, regular_user2, regular_user3 ]:
refresh( user )
if len( user.roles) != 1:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 1 )' % ( len( user.roles ), user.email ) )
#####################
# Reset DefaultHistoryPermissions for regular_user1
#####################
self.logout()
self.login( email=regular_user1.email )
# Change DefaultHistoryPermissions for regular_user1 back to the default
permissions_in = [ 'DATASET_MANAGE_PERMISSIONS' ]
permissions_out = [ 'DATASET_ACCESS' ]
self.user_set_default_permissions( permissions_in=permissions_in,
permissions_out=permissions_out,
role_id=str( regular_user1_private_role.id ) )
self.logout()
self.login( email=admin_user.email )
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/test/functional/test_data_security.py | Python | gpl-3.0 | 12,947 | [
"Galaxy"
] | 36c80cddde1bcf78c14b8650233e38a7cb961778b587b0c75c2a18f18f493e41 |
"""
Imports all the functions in the various files.
Doing this explicitly for clarity
"""
from .array_help import (
find_nearest,
find_nearest_index,
copy_range,
copy_range_array,
copy_range_yarray,
sort_by_list,
find_nearest_tolerance,
sort_array_column,
split_columns,
)
from .background import subtract_background, find_background_lp_filter
from .peaks import (
find_peaks,
find_peaks_pu,
lorentzian,
gaussian,
voigt,
lorentzian_d,
lorentzian_dd,
gaussian_d,
gaussian_dd,
)
from .fitting import (
make_region_array,
make_regions,
fit_data,
fit_data_bg,
output_results,
batch_fit_single_peak,
line_fit,
exponential_fit,
exponential_fit_offset,
poly_fit,
fit_peaks,
split_and_fit,
fit_peak_table,
build_model,
set_parameters,
build_model_d,
build_model_dd,
peak_table,
)
from .convert import (
wl2wn,
wl2rwn,
wn2wl,
rwn2wl,
rwn2wn,
absorption,
nm2ev,
nm2ev_xy,
nm2ev_xyz,
)
from .file_io import (
assure_path_exists,
write2col,
getxy,
clean_file,
path,
load_folder,
quick_load_xy,
make_fname,
rpath,
read_json,
write_json,
)
from .normalize import (
normalize,
normalize_msc,
normalize_pq,
normalize_2pt,
normalize_fs,
)
from .filters import smooth_data, butter_lp_filter
from .calibrate import (
calibrate,
find_laser_wavelength,
find_best_offset,
find_best_offset2,
calibrate_x_data2,
neon_peaks,
calibrate_neon_wavenumber,
)
from .read_files import read_cary, read_craic, read_nicolet, read_horiba, read_renishaw
from .misc import remove_absorption_jumps, generate_spectrum, activity_to_intensity
from .plot import plot_peak_fit, plot_components, plot_background, plot_fit
from .filters import resample
# Need to actually make some of these work
# from filters import smooth_data, butter_lp_filter, butter_lowpass_filter, \
# butter_lowpass, wicker, savgol, butterworth_bandpass
| rohanisaac/spectra | spectra/__init__.py | Python | gpl-3.0 | 2,083 | [
"Gaussian"
] | 8c69df1f3f0627a764185c6e93c0d3be260520871af55ae7bba4db211043920c |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2014 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors :
# Luis Cañas-Díaz <lcanas@bitergia.com>
# Daniel Izquierdo Cortázar <dizquierdo@bitergia.com>
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
# launch.py
#
# This script automates the execution of some of the metrics grimoire
# tools (Bicho, MLStats, CVSAnaly). It uses configuration files to get
# the parameters. Finally it execute R scripts in order to generate the
# JSON files
import logging
import os
import subprocess
import sys
import time
import distutils.dir_util
import json
import datetime as dt
from optparse import OptionGroup, OptionParser
from tempfile import NamedTemporaryFile
from ConfigParser import SafeConfigParser
import MySQLdb
# conf variables from file(see read_main_conf)
options = {}
# global var for directories
project_dir = ''
msg_body = ''#project_dir + '/log/launch.log'
scm_dir = ''#os.getcwd() + '/../scm/'
conf_dir = ''#os.getcwd() + '/../conf/'
json_dir = ''
production_dir = ''
tools = {
'scm' :'/usr/local/bin/cvsanaly2',
'its': '/usr/local/bin/bicho',
'scr': '/usr/local/bin/bicho',
'mls': '/usr/local/bin/mlstats',
'irc': '/usr/local/bin/irc_analysis.py',
'mediawiki': '/usr/local/bin/mediawiki_analysis.py',
'sibyl': '/usr/local/bin/sibyl.py',
'octopus': '/usr/local/bin/octopus',
'pullpo': '/usr/local/bin/pullpo',
'eventizer': '/usr/local/bin/eventizer',
'r': '/usr/bin/R',
'rremoval': '/usr/local/bin/rremoval',
'git': '/usr/bin/git',
'svn': '/usr/bin/svn',
'mysqldump': '/usr/bin/mysqldump',
'compress': '/usr/bin/7zr',
'rm': '/bin/rm',
'rsync': '/usr/bin/rsync',
'sortinghat': '/usr/local/bin/sortinghat',
'mg2sh': '/usr/local/bin/mg2sh',
'sh2mg': '/usr/local/bin/sh2mg',
}
# Config files where lists of repositories are found.
# It is expected to find a repository per line
BICHO_TRACKERS = "bicho_trackers.conf"
BICHO_TRACKERS_BLACKLIST = "bicho_trackers_blacklist.conf"
BICHO_1_TRACKERS = "bicho_1_trackers.conf"
BICHO_1_TRACKERS_BLACKLIST = "bicho_1_trackers_blacklist.conf"
CVSANALY_REPOSITORIES = "cvsanaly_repositories.conf"
CVSANALY_REPOSITORIES_BLACKLIST = "cvsanaly_repositories_blacklist.conf"
GERRIT_PROJECTS = "gerrit_trackers.conf"
GERRIT_PROJECTS_BLACKLIST = "gerrit_trackers_blacklist.conf"
MLSTATS_MAILING_LISTS = "mlstats_mailing_lists.conf"
MLSTATS_MAILING_LISTS_BLACKLIST = "mlstats_mailing_lists_blacklist.conf"
PUPPET_RELEASES = "puppet_releases.conf"
DOCKER_PACKAGES = "docker_packages.conf"
def get_options():
parser = OptionParser(usage='Usage: %prog [options]',
description='Update data, process it and obtain JSON files',
version='0.1')
parser.add_option('-d','--dir', dest='project_dir',
help='Path with the configuration of the project', default=None)
parser.add_option('-q','--quiet', action='store_true', dest='quiet_mode',
help='Disable messages in standard output', default=False)
parser.add_option('-s','--section', dest='section',
help='Section to be executed', default=None)
parser.add_option('-t','--data-source', dest='subtask',
help='Sub section to be executed (only for r)', default=None)
parser.add_option('--filter', dest='filter',
help='Filter to be used (repository, company, project, country ...)', default=None)
parser.add_option('-g', '--debug', action='store_true', dest='debug',
help='Enable debug mode', default=False)
parser.add_option('--python', dest='python', action="store_true",
help='Use python script for getting metrics. (obsolete)')
(ops, args) = parser.parse_args()
if ops.project_dir is None:
parser.print_help()
print("Project dir is required")
sys.exit(1)
return ops
def initialize_globals(pdir):
global project_dir
global msg_body
global scm_dir
global irc_dir
global conf_dir
global downs_dir
global json_dir
global repos_dir
global scripts_dir
global production_dir
global identities_dir
global downloads_dir
global r_dir
project_dir = pdir
msg_body = project_dir + '/log/launch.log'
scm_dir = project_dir + '/scm/'
irc_dir = project_dir + '/irc/'
conf_dir = project_dir + '/conf/'
downs_dir = project_dir + '/downloads/'
json_dir = project_dir + '/json/'
repos_dir = conf_dir + "repositories/"
scripts_dir = project_dir + '/scripts/'
production_dir = project_dir + '/production/'
identities_dir = project_dir + '/tools/VizGrimoireUtils/identities/'
downloads_dir = project_dir + '/tools/VizGrimoireUtils/downloads/'
r_dir = project_dir + '/tools/GrimoireLib/vizGrimoireJS/'
def read_main_conf():
parser = SafeConfigParser()
conf_file = project_dir + '/conf/main.conf'
fd = open(conf_file, 'r')
parser.readfp(fd)
fd.close()
sec = parser.sections()
for s in sec:
options[s] = {}
opti = parser.options(s)
for o in opti:
# first, some special cases
if o == 'debug':
options[s][o] = parser.getboolean(s,o)
elif o in ('trackers', 'projects', 'pre_scripts', 'post_scripts'):
data_sources = parser.get(s,o).split(',')
options[s][o] = [ds.replace('\n', '') for ds in data_sources]
else:
options[s][o] = parser.get(s,o)
return options
def repositories(file_path):
""" Returns the list of repositories found in file_path
:param file_patch: file where the repositories are found
:returns: a list of repositories
"""
global conf_dir
file_path = os.path.join(conf_dir, file_path)
print file_path
repositories = open(file_path).read().splitlines()
return repositories
# git specific: search all repos in a directory recursively
def get_scm_repos(dir = scm_dir):
all_repos = []
if (dir == ''): dir = scm_dir
if not os.path.isdir(dir): return all_repos
repos = os.listdir(dir)
for r in repos:
repo_dir_git = os.path.join(dir,r,".git")
repo_dir_svn = os.path.join(dir,r,".svn")
if os.path.isdir(repo_dir_git) or os.path.isdir(repo_dir_svn):
all_repos.append(os.path.join(dir,r))
sub_repos = get_scm_repos(os.path.join(dir,r))
for sub_repo in sub_repos:
all_repos.append(sub_repo)
return all_repos
def update_scm(dir = scm_dir):
compose_msg("SCM is being updated")
repos = get_scm_repos()
updated = False
log_file = project_dir + '/log/launch_cvsanaly.log'
for r in repos:
os.chdir(r)
if os.path.isdir(os.path.join(dir,r,".git")):
os.system("GIT_ASKPASS=echo git fetch origin >> %s 2>&1" %(log_file))
errcode = os.system("GIT_ASKPASS=echo git reset --hard origin/master -- >> %s 2>&1" %(log_file))
if errcode != 0:
# Sometimes master branch does not exists and it's replaced by trunk
os.system("GIT_ASKPASS=echo git reset --hard origin/trunk -- >> %s 2>&1" %(log_file))
elif os.path.isdir(os.path.join(dir,r,".svn")):
os.system("svn update >> %s 2>&1" %(log_file))
else: compose_msg(r + " not git nor svn.", log_file)
compose_msg(r + " update ended", log_file)
if updated: compose_msg("[OK] SCM updated")
def check_tool(cmd):
return os.path.isfile(cmd) and os.access(cmd, os.X_OK)
return True
def check_tools():
tools_ok = True
for tool in tools:
if not check_tool(tools[tool]):
compose_msg(tools[tool]+" not found or not executable.")
print (tools[tool]+" not found or not executable.")
tools_ok = False
if not tools_ok: print ("Missing tools. Some reports could not be created.")
def launch_checkdbs():
dbs = []
db_user = options['generic']['db_user']
db_password = options['generic']['db_password']
if options['generic'].has_key('db_identities'):
dbs.append(options['generic']['db_identities'])
if options['generic'].has_key('db_cvsanaly'):
dbs.append(options['generic']['db_cvsanaly'])
if options['generic'].has_key('db_bicho'):
dbs.append(options['generic']['db_bicho'])
if options['generic'].has_key('db_bicho_1'):
dbs.append(options['generic']['db_bicho_1'])
# mlstats creates the db if options['generic'].has_key('db_mlstats'):
if options['generic'].has_key('db_gerrit'):
dbs.append(options['generic']['db_gerrit'])
if options['generic'].has_key('db_irc'):
dbs.append(options['generic']['db_irc'])
if options['generic'].has_key('db_mediawiki'):
dbs.append(options['generic']['db_mediawiki'])
if options['generic'].has_key('db_releases'):
dbs.append(options['generic']['db_releases'])
# LEGACY qaforums. Use sibyl in new deployments.
if options['generic'].has_key('db_qaforums'):
dbs.append(options['generic']['db_qaforums'])
if options['generic'].has_key('db_sibyl'):
dbs.append(options['generic']['db_sibyl'])
if options['generic'].has_key('db_downloads'):
dbs.append(options['generic']['db_downloads'])
if options['generic'].has_key('db_pullpo'):
dbs.append(options['generic']['db_pullpo'])
if options['generic'].has_key('db_eventizer'):
dbs.append(options['generic']['db_eventizer'])
# sortinghat creates the db itself if options['generic'].has_key('db_sortinghat'):
if options['generic'].has_key('db_projects'):
dbs.append(options['generic']['db_projects'])
# Octopus db
if options['generic'].has_key('db_octopus'):
dbs.append(options['generic']['db_octopus'])
for dbname in dbs:
try:
db = MySQLdb.connect(user = db_user, passwd = db_password, db = dbname)
db.close()
except:
print ("Can't connect to " + dbname)
db = MySQLdb.connect(user = db_user, passwd = db_password)
cursor = db.cursor()
query = "CREATE DATABASE " + dbname + " CHARACTER SET utf8"
cursor.execute(query)
db.close()
print (dbname+" created")
def launch_scripts(scripts):
# Run a list of scripts
for script in scripts:
cmd = os.path.join(scripts_dir, script) + " >> %s 2>&1" % msg_body
compose_msg("Running %s" % cmd)
os.system(cmd)
compose_msg("%s script completed" % script)
def launch_pre_tool_scripts(tool):
if tool not in options:
return
if options[tool].has_key('pre_scripts'):
compose_msg("Running %s pre scripts" % tool)
launch_scripts(options[tool]['pre_scripts'])
compose_msg("%s pre scripts completed" % tool)
else:
compose_msg("No %s pre scripts configured" % tool)
def launch_post_tool_scripts(tool):
if tool not in options:
return
if options[tool].has_key('post_scripts'):
compose_msg("Running %s post scripts" % tool)
launch_scripts(options[tool]['post_scripts'])
compose_msg("%s post scripts completed" % tool)
else:
compose_msg("No %s post scripts configured" % tool)
def launch_cvsanaly():
# using the conf executes cvsanaly for the repos inside scm dir
if options.has_key('cvsanaly'):
if not check_tool(tools['scm']):
return
update_scm()
compose_msg("cvsanaly is being executed")
launched = False
db_name = options['generic']['db_cvsanaly']
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
if (db_pass == ""): db_pass = "''"
log_file = project_dir + '/log/launch_cvsanaly.log'
# we launch cvsanaly against the repos
repos = get_scm_repos()
# pre-scripts
launch_pre_tool_scripts('cvsanaly')
for r in repos:
launched = True
os.chdir(r)
if options['cvsanaly'].has_key('extensions'):
cmd = tools['scm'] + " -u %s -p %s -d %s --extensions=%s >> %s 2>&1" \
%(db_user, db_pass, db_name, options['cvsanaly']['extensions'], log_file)
else:
cmd = tools['scm'] + " -u %s -p %s -d %s >> %s 2>&1" \
%(db_user, db_pass, db_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if launched:
compose_msg("[OK] cvsanaly executed")
# post-scripts
launch_post_tool_scripts('cvsanaly')
else:
compose_msg("[SKIPPED] cvsanaly was not executed")
else:
compose_msg("[SKIPPED] cvsanaly not executed, no conf available")
def launch_bicho(section = None):
do_bicho('bicho')
# find additional configs
do_bicho('bicho_1')
def do_bicho(section = None):
# reads a conf file with all of the information and launches bicho
if section is None: section = 'bicho'
if not section.startswith("bicho"):
logging.error("Wrong bicho section name " + section)
if options.has_key(section):
if not check_tool(tools['its']):
return
compose_msg("bicho is being executed")
launched = False
database = options['generic']['db_' + section]
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
delay = options[section]['delay']
backend = options[section]['backend']
backend_user = backend_password = None
backend_token = None
num_issues_query = None
if options[section].has_key('backend_user'):
backend_user = options[section]['backend_user']
if options[section].has_key('backend_password'):
backend_password = options[section]['backend_password']
if options[section].has_key('backend_token'):
backend_token = options[section]['backend_token']
if options[section].has_key('num-issues-query'):
num_issues_query = options[section]['num-issues-query']
# Retrieving trackers from config file or from an external config file
if options[section].has_key('trackers'):
trackers = options[section]['trackers']
else:
trackers = repositories(BICHO_TRACKERS)
if section == "bicho_1" and not options[section].has_key('trackers'):
trackers = repositories(BICHO_1_TRACKERS)
log_table = None
debug = options[section]['debug']
if options[section].has_key('log_table'):
log_table = options[section]['log_table']
log_file = project_dir + '/log/launch_bicho.log'
# we compose some flags
flags = ""
if debug:
flags = flags + " -g"
# we'll only create the log table in the last execution
cont = 0
last = len(trackers)
# pre-scripts
launch_pre_tool_scripts(section)
for t in trackers:
launched = True
cont = cont + 1
if cont == last and log_table:
flags = flags + " -l"
user_opt = ''
# Authentication parameters
if backend_token:
user_opt = '--backend-token=%s' % (backend_token)
elif backend_user and backend_password:
user_opt = '--backend-user=%s --backend-password=%s' % (backend_user, backend_password)
if num_issues_query:
user_opt = user_opt + ' --num-issues=%s' % (num_issues_query)
cmd = tools['its'] + " --db-user-out=%s --db-password-out=%s --db-database-out=%s -d %s -b %s %s -u %s %s >> %s 2>&1" \
% (db_user, db_pass, database, str(delay), backend, user_opt, t, flags, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if launched:
compose_msg("[OK] bicho executed")
# post-scripts
launch_post_tool_scripts(section)
else:
compose_msg("[SKIPPED] bicho was not executed")
else:
compose_msg("[SKIPPED] bicho not executed, no conf available for " + section)
def launch_gather():
""" This tasks will execute in parallel all data gathering tasks """
logging.info("Executing all data gathering tasks in parallel")
from multiprocessing import Process, active_children
gather_tasks_order = ['cvsanaly','bicho','gerrit','mlstats',
'irc','mediawiki', 'downloads', 'sibyl',
'octopus','pullpo','eventizer']
for section in gather_tasks_order:
logging.info("Executing %s ...." % (section))
p = Process(target=tasks_section_gather[section])
p.start()
# Wait until all processes finish
while True:
active = active_children()
if len(active) == 0:
break
else:
time.sleep(0.5)
def remove_gerrit_repositories(repositories, db_user, db_pass, database):
for project in repositories:
compose_msg("Removing %s " % (project))
# Remove not found projects.
# WARNING: if a repository name is different from the one in the database
# list of repositories, this piece of code may remove all
# of the repositories in the database.
# An example would be how Gerrit returns the name of the projects, while
# Bicho stores such information in URL format.
proc = subprocess.Popen([tools['rremoval'], "-u", db_user, "-p", db_pass,
"-d", database, "-b", "bicho", "-r", project],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process_output = proc.communicate()
def launch_gerrit():
# reads a conf file with all of the information and launches bicho
if options.has_key('gerrit'):
if not check_tool(tools['scr']):
return
compose_msg("bicho (gerrit) is being executed")
launched = False
database = options['generic']['db_gerrit']
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
delay = options['gerrit']['delay']
backend = options['gerrit']['backend']
trackers = options['gerrit']['trackers']
# Retrieving projects from database
proc = subprocess.Popen([tools['rremoval'], "-u", db_user, "-p", db_pass,
"-d", database, "-b", "bicho", "-l"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process_output = proc.communicate()
db_projects = eval(process_output[0])
# Retrieving projects
if options['gerrit'].has_key('projects'):
projects = options['gerrit']['projects']
projects = [str(trackers[0]) + "_" + project.replace('"', '') for project in projects]
else:
all_projects = repositories(repos_dir + GERRIT_PROJECTS)
# Open repositories to be analyzed
projects_blacklist = repositories(repos_dir + GERRIT_PROJECTS_BLACKLIST)
projects = [project for project in all_projects if project not in projects_blacklist ]
# Using format from Bicho database to manage Gerrit URLs
projects = [str(trackers[0]) + "_" + project for project in projects]
projects_blacklist = [str(trackers[0]) + "_" + project for project in projects_blacklist]
# Removing blacklist projects if they are found in the database
projects_blacklist = [project for project in projects_blacklist if project in db_projects]
compose_msg("Removing the following projects found in the blacklist and in the database")
# Checking if more than a 5% of the total list is going to be removed.
# If so, a warning message is raised and no project is removed.
if len(projects) == 0 or float(len(projects_blacklist))/float(len(projects)) > 0.05:
compose_msg("WARNING: More than a 5% of the total number of projects is required to be removed. No action.")
else:
remove_gerrit_repositories(projects_blacklist, db_user, db_pass, database)
# Removing those projects that are found in the database, but not in
# the list of projects.
to_remove_projects = [project for project in db_projects if project not in projects]
compose_msg("Removing the following deprecated projects from the database")
if len(projects) == 0 or float(len(to_remove_projects)) / float(len(projects)) >= 0.05:
compose_msg("WARNING: More than a 5% of the total number of projects is required to be removed. No action.")
else:
remove_gerrit_repositories(to_remove_projects, db_user, db_pass, database)
debug = options['gerrit']['debug']
log_table = None
if options['gerrit'].has_key('log_table'):
log_table = options['gerrit']['log_table']
log_file = project_dir + '/log/launch_gerrit.log'
flags = ""
if debug:
flags = flags + " -g"
# pre-scripts
launch_pre_tool_scripts('gerrit')
# we'll only create the log table in the last execution
cont = 0
last = len(projects)
# Re-formating the projects name
projects = [project.replace(str(trackers[0]) + "_", "") for project in projects]
for project in projects:
launched = True
cont = cont + 1
if cont == last and log_table:
flags = flags + " -l"
g_user = ''
if options['gerrit'].has_key('user'):
g_user = '--backend-user ' + options['gerrit']['user']
cmd = tools['scr'] + " --db-user-out=%s --db-password-out=%s --db-database-out=%s -d %s -b %s %s -u %s --gerrit-project=%s %s >> %s 2>&1" \
% (db_user, db_pass, database, str(delay), backend, g_user, trackers[0], project, flags, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if launched:
compose_msg("[OK] bicho (gerrit) executed")
# post-scripts
launch_post_tool_scripts('gerrit')
else:
compose_msg("[SKIPPED] bicho (gerrit) not executed")
else:
compose_msg("[SKIPPED] bicho (gerrit) not executed, no conf available")
def launch_mlstats():
if options.has_key('mlstats'):
if not check_tool(tools['mls']):
return
compose_msg("mlstats is being executed")
launched = False
db_admin_user = options['generic']['db_user']
db_user = db_admin_user
db_pass = options['generic']['db_password']
db_name = options['generic']['db_mlstats']
# Retrieving mailing lists
if options['mlstats'].has_key('mailing_lists'):
mlists = options['mlstats']['mailing_lists'].split(",")
mlists = [m[m.find('"')+1:m.rfind('"')] for m in mlists]
else:
mlists = repositories(MLSTATS_MAILING_LISTS)
force = ''
if options['mlstats'].has_key('force'):
if options['mlstats']['force'] is True:
force = '--force'
log_file = project_dir + '/log/launch_mlstats.log'
# pre-scripts
launch_pre_tool_scripts('mlstats')
for m in mlists:
launched = True
cmd = tools['mls'] + " %s --no-report --db-user=\"%s\" --db-password=\"%s\" --db-name=\"%s\" --db-admin-user=\"%s\" --db-admin-password=\"%s\" \"%s\" >> %s 2>&1" \
%(force, db_user, db_pass, db_name, db_admin_user, db_pass, m, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if launched:
compose_msg("[OK] mlstats executed")
# post-scripts
launch_post_tool_scripts('mlstats')
else:
compose_msg("[SKIPPED] mlstats not executed")
else:
compose_msg("[SKIPPED] mlstats was not executed, no conf available")
def launch_irc():
if options.has_key('irc'):
if not check_tool(tools['irc']):
return
compose_msg("irc_analysis is being executed")
launched = False
db_admin_user = options['generic']['db_user']
db_user = db_admin_user
db_pass = options['generic']['db_password']
db_name = options['generic']['db_irc']
format = 'plain'
if options['irc'].has_key('format'):
format = options['irc']['format']
channels = os.listdir(irc_dir)
os.chdir(irc_dir)
log_file = project_dir + '/log/launch_irc.log'
# pre-scripts
launch_pre_tool_scripts('irc')
if format == 'slack':
if options['irc'].has_key('token'):
token = options['irc']['token']
launched = True
cmd = tools['irc'] + " --db-user=\"%s\" --db-password=\"%s\" --database=\"%s\" --token %s --format %s>> %s 2>&1" \
% (db_user, db_pass, db_name, token, format, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
else:
logging.error("Slack IRC supports need token option.")
else:
for channel in channels:
if not os.path.isdir(os.path.join(irc_dir,channel)): continue
launched = True
cmd = tools['irc'] + " --db-user=\"%s\" --db-password=\"%s\" --database=\"%s\" --dir=\"%s\" --channel=\"%s\" --format %s>> %s 2>&1" \
% (db_user, db_pass, db_name, channel, channel, format, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if launched:
compose_msg("[OK] irc_analysis executed")
# post-scripts
launch_post_tool_scripts('irc')
else:
compose_msg("[SKIPPED] irc_analysis not executed")
else:
compose_msg("[SKIPPED] irc_analysis was not executed, no conf available")
def launch_mediawiki():
if options.has_key('mediawiki'):
if not check_tool(tools['mediawiki']):
return
compose_msg("mediawiki_analysis is being executed")
launched = False
db_admin_user = options['generic']['db_user']
db_user = db_admin_user
db_pass = options['generic']['db_password']
db_name = options['generic']['db_mediawiki']
sites = options['mediawiki']['sites']
log_file = project_dir + '/log/launch_mediawiki.log'
# pre-scripts
launch_pre_tool_scripts('mediawiki')
for site in sites.split(","):
launched = True
# ./mediawiki_analysis.py --database acs_mediawiki_rdo_2478 --db-user root --url http://openstack.redhat.com
cmd = tools['mediawiki'] + " --db-user=\"%s\" --db-password=\"%s\" --database=\"%s\" --url=\"%s\" >> %s 2>&1" \
%(db_user, db_pass, db_name, sites, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if launched:
compose_msg("[OK] mediawiki_analysis executed")
# post-scripts
launch_post_tool_scripts('mediawiki')
else:
compose_msg("[SKIPPED] mediawiki_analysis not executed")
else:
compose_msg("[SKIPPED] mediawiki_analysis was not executed, no conf available")
def launch_downloads():
# check if downloads option exists. If it does, downloads are executed
if options.has_key('downloads'):
compose_msg("downloads does not execute any tool. Only pre and post scripts")
# pre-scripts
launch_pre_tool_scripts('downloads')
# post-scripts
launch_post_tool_scripts('downloads')
def launch_sibyl():
# check if sibyl option exists
if options.has_key('sibyl'):
if not check_tool(tools['sibyl']):
return
if not options['sibyl'].has_key('url'):
return
compose_msg("sibyl is being executed")
launched = False
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
# db_name = options['generic']['db_qaforums']
db_name = options['generic']['db_sibyl']
url = options['sibyl']['url']
backend = options['sibyl']['backend']
api_key = tags = ""
if 'api_key' in options['sibyl']:
api_key = " -k \"" + options['sibyl']['api_key'] + "\""
if 'tags' in options['sibyl']:
tags = " --tags \"" + options['sibyl']['tags'] + "\""
log_file = project_dir + '/log/launch_sibyl.log'
# pre-scripts
launch_pre_tool_scripts('sibyl')
cmd = tools['sibyl'] + " --db-user=\"%s\" --db-password=\"%s\" --database=\"%s\" --url=\"%s\" --type=\"%s\" %s %s >> %s 2>&1" \
%(db_user, db_pass, db_name, url, backend, api_key, tags, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
# TODO: it's needed to check if the process correctly finished
launched = True
if launched:
compose_msg("[OK] sibyl executed")
else:
compose_msg("[SKIPPED] sibyl not executed")
else:
compose_msg("[SKIPPED] sibyl was not executed, no conf available")
def pull_directory(path):
pr = subprocess.Popen(['/usr/bin/git', 'fetch', 'origin'],
cwd=os.path.dirname(path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
(out, error) = pr.communicate()
pr = subprocess.Popen(['/usr/bin/git', 'reset', '--hard', 'origin/master', '--'],
cwd=os.path.dirname(path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
(out, error) = pr.communicate()
def push_directory(path):
pr = subprocess.Popen(['/usr/bin/git', 'add', './*'],
cwd=os.path.dirname(path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
(out, error) = pr.communicate()
pr = subprocess.Popen(['/usr/bin/git', 'commit', '-m', 'Updated by the Owl Bot'],
cwd=os.path.dirname(path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
(out, error) = pr.communicate()
pr = subprocess.Popen(['/usr/bin/git', 'push', 'origin', 'master'],
cwd=os.path.dirname(path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
(out, error) = pr.communicate()
def launch_octopus():
launch_octopus_puppet()
launch_octopus_docker()
launch_octopus_github()
launch_octopus_gerrit()
def launch_octopus_export(cmd, backend):
""" Exports the list of repositories to the specific config file"""
# Adding the '--export' option, this disables the rest of the Octopus options
cmd = cmd + ' --export '
if backend == 'puppet':
output = PUPPET_RELEASES
elif backend == 'docker':
output = DOCKER_PACKAGES
elif backend == 'github':
output = CVSANALY_REPOSITORIES
elif backend == 'gerrit':
output = GERRIT_PROJECTS
if not os.path.isdir(repos_dir):
compose_msg("WARNING: '" + repos_dir + "' does not exist")
if os.path.isdir(repos_dir):
# This tries to fetch and push new data when exporting octopus info
pull_directory(repos_dir)
os.system(cmd + " > " + repos_dir + output)
if os.path.isdir(repos_dir):
# This tries to push new changes in the file
push_directory(repos_dir)
def launch_octopus_puppet():
# check if octopus_puppet option exists
if options.has_key('octopus_puppet'):
if not check_tool(tools['octopus']):
return
compose_msg("octopus for puppet is being executed")
launched = False
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_releases']
url = options['octopus_puppet']['url']
log_file = project_dir + '/log/launch_octopus_puppet.log'
# pre-scripts
launch_pre_tool_scripts('octopus_puppet')
cmd = tools['octopus'] + " -u \"%s\" -p \"%s\" -d \"%s\" puppet \"%s\">> %s 2>&1" \
%(db_user, db_pass, db_name, url, log_file)
export_cmd = tools['octopus'] + " -u \"%s\" -p \"%s\" -d \"%s\" puppet \"%s\" "\
%(db_user, db_pass, db_name, url, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
# TODO: it's needed to check if the process correctly finished
launched = True
# Export data if required
if options['octopus_puppet'].has_key('export'):
launch_octopus_export(export_cmd, 'puppet')
if launched:
compose_msg("[OK] octopus for puppet executed")
launch_post_tool_scripts('octopus_puppet')
else:
compose_msg("[SKIPPED] octopus for puppet not executed")
else:
compose_msg("[SKIPPED] octopus for puppet was not executed, no conf available")
def launch_octopus_docker():
# check if octopus_docker option exists
if options.has_key('octopus_docker'):
if not check_tool(tools['octopus']):
return
compose_msg("octopus for docker is being executed")
launched = False
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_releases']
url = options['octopus_docker']['url']
log_file = project_dir + '/log/launch_octopus_docker.log'
owner = options['octopus_docker']['owner']
owners = owner.split(",")
# pre-scripts
launch_pre_tool_scripts('octopus_docker')
octopus_cmd = tools['octopus'] + " -u \"%s\" -p \"%s\" -d \"%s\" docker \"%s\" " \
% (db_user, db_pass, db_name, url)
export_cmd = octopus_cmd
for owner in owners:
owner = owner.strip()
repositories = None
if options['octopus_docker'].has_key('repositories') and len(owners) == 1:
repositories = options['octopus_docker']['repositories'].split(",")
elif options['octopus_docker'].has_key('repositories'):
logging.error("Wrong main.conf. Several octopus docker owners and general repositories config.")
raise
if len(owners) > 1:
if options['octopus_docker'].has_key('repositories_' + owner.lower()):
repositories = options['octopus_docker']['repositories_' + owner.lower()].split(",")
if repositories:
# Launch octopus for each docker repository configured
for repo in repositories:
repo = repo.strip()
cmd = octopus_cmd + "\"%s\" \"%s\">> %s 2>&1" % (owner, repo, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
else:
logging.error("No repositories configured for %s docker owner. Skipped" % owner)
# Export data if required
if options['octopus_docker'].has_key('export'):
launch_octopus_export(export_cmd, 'docker')
launched = True
if launched:
compose_msg("[OK] octopus for docker executed")
launch_post_tool_scripts('octopus_docker')
else:
compose_msg("[SKIPPED] octopus for docker not executed")
else:
compose_msg("[SKIPPED] octopus for docker was not executed, no conf available")
def launch_octopus_github():
# check if octopus_github option exists
if options.has_key('octopus_github'):
if not check_tool(tools['octopus']):
return
compose_msg("octopus for github is being executed")
launched = False
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_releases']
log_file = project_dir + '/log/launch_octopus_github.log'
owner = options['octopus_github']['owner']
owners = owner.split(",")
if options['octopus_github'].has_key('oauth_key'):
oauth_key = options['octopus_github']['oauth_key']
else:
user = options['octopus_github']['user']
password = options['octopus_github']['password']
url = ""
if options['octopus_github'].has_key('url'):
url = "--gh-url " + options['octopus']['url']
# Common octopus command for all options
if options['octopus_github'].has_key('oauth_key'):
auth_params = "--gh-token " + oauth_key
else:
auth_params = "--gh-user=\""+user+"\" --gh-password=\""+password+"\""
octopus_cmd = tools['octopus'] + " -u \"%s\" -p \"%s\" -d \"%s\" github %s %s " \
%(db_user, db_pass, db_name, auth_params , url)
export_cmd = octopus_cmd
# pre-scripts
launch_pre_tool_scripts('octopus_github')
for owner in owners:
owner = owner.strip()
repositories = None
if options['octopus_github'].has_key('repositories') and len(owners) == 1:
repositories = options['octopus_github']['repositories'].split(",")
elif options['octopus_github'].has_key('repositories'):
logging.error("Wrong main.conf. Several octopus github owners and general repositories config.")
raise
if len(owners) > 1:
if options['octopus_github'].has_key('repositories_' + owner.lower()):
repositories = options['octopus_github']['repositories_' + owner.lower()].split(",")
if repositories:
# Launch octopus for each docker repository configured
for repo in repositories:
repo = repo.strip()
cmd = octopus_cmd + "\"%s\" \"%s\">> %s 2>&1" % (owner, repo, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
else:
# Launch octopus for all the repositories
cmd = octopus_cmd + "\"%s\" >> %s 2>&1" % (owner, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
# Export data if required
if options['octopus_github'].has_key('export'):
launch_octopus_export(export_cmd, 'github')
launched = True
if launched:
compose_msg("[OK] octopus for github executed")
launch_post_tool_scripts('octopus_github')
else:
compose_msg("[SKIPPED] octopus for github not executed")
else:
compose_msg("[SKIPPED] octopus for github was not executed, no conf available")
def launch_octopus_gerrit():
""" Octopus Gerrit backend """
launched = False
if options.has_key('octopus_gerrit'):
if not check_tool(tools['octopus']):
return
compose_msg("octopus for gerrit is being executed")
# Common options
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_octopus']
log_file = project_dir + '/log/launch_octopus_gerrit.log'
# Gerrit specific options
gerrit_user = options['octopus_gerrit']['gerrit_user']
gerrit_url = options['octopus_gerrit']['gerrit_url']
octopus_cmd = tools['octopus'] + " -u \"%s\" -p \"%s\" -d \"%s\" gerrit --gerrit-user \"%s\" --gerrit-url \"%s\" " \
% (db_user, db_pass, db_name, gerrit_user, gerrit_url)
export_cmd = octopus_cmd
# pre-scripts
launch_pre_tool_scripts('octopus_gerrit')
# Execute Octopus Gerrit backend
compose_msg(octopus_cmd, log_file)
os.system(octopus_cmd)
launched = True
compose_msg("[OK] octopus for gerrit executed")
# post-scripts
launch_post_tool_scripts('octopus_gerrit')
# Export data if required
if options['octopus_gerrit'].has_key('export'):
launch_octopus_export(export_cmd, 'gerrit')
if not launched:
compose_msg("[SKIPPED] octopus for gerrit not executed")
def check_sortinghat_db(db_user, db_pass, db_name):
""" Check that the db exists and if not, create it """
log_file = project_dir + '/log/launch_sortinghat_affiliations.log'
try:
db = MySQLdb.connect(user = db_user, passwd = db_pass, db = db_name)
db.close()
print ("Sortinghat " + db_name + " already exists")
except:
print ("Can't connect to " + db_name)
print ("Creating sortinghat database ...")
cmd = tools['sortinghat'] + " -u \"%s\" -p \"%s\" init \"%s\">> %s 2>&1" \
%(db_user, db_pass, db_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
def launch_sortinghat():
logging.info("Sortinghat working ...")
if not check_tool(tools['sortinghat']):
logging.info("Sortinghat tool not available,")
return
if 'db_sortinghat' not in options['generic']:
logging.info("No database for Sortinghat configured.")
return
project_name = options['generic']['project']
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_sortinghat']
log_file = project_dir + '/log/launch_sortinghat.log'
check_sortinghat_db(db_user, db_pass, db_name)
# pre-scripts
launch_pre_tool_scripts('sortinghat')
# Import data from a master repo, if it's set
success = False
if 'master' in options['sortinghat']:
success = restore_sortinghat_master()
# For each data source export identities and load them in sortinghat
report = get_report_module()
dss = report.get_data_sources()
dss_not_supported = ['downloads']
# Temporal file to export and import identities from/in SH
io_file = NamedTemporaryFile()
io_file_name = io_file.name
io_file.close()
# Import data in Sorting Hat
for ds in dss:
if ds.get_name() in dss_not_supported: continue
if ds.get_db_name() in options['generic']:
db_ds = options['generic'][ds.get_db_name()]
else:
logging.error(ds.get_db_name() + " not in automator main.conf")
continue
# Export identities from ds
cmd = tools['mg2sh'] + " -u \"%s\" -p \"%s\" -d \"%s\" --source \"%s:%s\" -o %s >> %s 2>&1" \
%(db_user, db_pass, db_ds, project_name.lower(), ds.get_name(), io_file_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
# Load identities in sortinghat in incremental mode
cmd = tools['sortinghat'] + " -u \"%s\" -p \"%s\" -d \"%s\" load --matching email-name -n %s >> %s 2>&1" \
%(db_user, db_pass, db_name, io_file_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
os.remove(io_file_name)
# Complete main identifier
db_pass_id = db_pass
if db_pass_id == '': db_pass_id = "''"
identifier2sh = identities_dir + '/identifier2sh.py'
cmd = identifier2sh + " -u %s -p %s -d \"%s\" " % (db_user, db_pass_id, db_name)
compose_msg(cmd, log_file)
os.system(cmd)
# Do affiliations
cmd = tools['sortinghat'] + " -u \"%s\" -p \"%s\" -d \"%s\" affiliate >> %s 2>&1" \
%(db_user, db_pass, db_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
# Export data from Sorting Hat
for ds in dss:
if ds.get_name() in dss_not_supported: continue
if ds.get_db_name() in options['generic']:
db_ds = options['generic'][ds.get_db_name()]
else:
logging.error(ds.get_db_name() + " not in automator main.conf")
continue
# Export identities from sh to file
cmd = tools['sortinghat'] + " -u \"%s\" -p \"%s\" -d \"%s\" export --source \"%s:%s\" --identities %s >> %s 2>&1" \
%(db_user, db_pass, db_name, project_name.lower(), ds.get_name(), io_file_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
# Load identities in mg from file
cmd = tools['sh2mg'] + " -u \"%s\" -p \"%s\" -d \"%s\" --source \"%s:%s\" %s >> %s 2>&1" \
%(db_user, db_pass, db_ds, project_name.lower(), ds.get_name(), io_file_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
os.remove(io_file_name)
# Create domains tables
if db_pass == '': db_pass = "''"
db_sortinghat = options['generic']['db_sortinghat']
cmd = "%s/domains_analysis.py -u %s -p %s -d %s --sortinghat>> %s 2>&1" \
% (identities_dir, db_user, db_pass, db_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if 'master' in options['sortinghat'] and success:
upload_sortinghat_master()
# post-scripts
launch_post_tool_scripts('sortinghat')
logging.info("Sortinghat done")
def restore_sortinghat_master():
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_sortinghat']
log_file = project_dir + '/log/launch_sortinghat.log'
master_dir = project_dir + '/sortinghat/'
sh_master = master_dir + options['sortinghat']['master']
# Update master repository
pull_directory(master_dir)
# Export sh information to a file
ts = dt.datetime.now()
ts = str(ts.date())
backup_file = project_dir + '/backups/sh_' + ts + '.json'
code = export_sortinghat(db_user, db_pass, db_name, backup_file, log_file)
if code != 0:
logging.info("Error making a Sorting Hat backup.")
return False
else:
logging.info("Sorting Hat backup dumped to %s" % (backup_file))
# Drop database
db = MySQLdb.connect(user=db_user, passwd=db_pass)
cursor = db.cursor()
query = "DROP DATABASE " + db_name
cursor.execute(query)
db.close()
# Create the new database
check_sortinghat_db(db_user, db_pass, db_name)
# Import data from master file
code = import_sortinghat(db_user, db_pass, db_name, sh_master, log_file)
if code != 0:
logging.info("Error importing Sorting Hat data from master file %s." % sh_master)
logging.info("Restoring old data")
code = import_sortinghat(db_user, db_pass, db_name, backup_file, log_file)
if code != 0:
msg = "Fatal error restoring Sorting Hat backup"
logging.info(msg)
raise Exception(msg)
else:
logging.info("Backup restored.")
logging.info("New Sorting Hat info will not updated on master file.")
return False
else:
logging.info("Data from master file imported into Sorting Hat")
return True
def upload_sortinghat_master():
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_sortinghat']
log_file = project_dir + '/log/launch_sortinghat.log'
master_dir = project_dir + '/sortinghat/'
sh_master = master_dir + options['sortinghat']['master']
export_sortinghat(db_user, db_pass, db_name, sh_master, log_file)
code = push_directory(master_dir)
def import_sortinghat(db_user, db_pass, db_name, io_file_name, log_file):
cmd = tools['sortinghat'] + " -u \"%s\" -p \"%s\" -d \"%s\" load %s >> %s 2>&1" \
% (db_user, db_pass, db_name, io_file_name, log_file)
compose_msg(cmd, log_file)
retcode = os.system(cmd)
return retcode
def export_sortinghat(db_user, db_pass, db_name, io_file_name, log_file):
cmd = tools['sortinghat'] + " -u \"%s\" -p \"%s\" -d \"%s\" export --identities %s >> %s 2>&1" \
% (db_user, db_pass, db_name, io_file_name, log_file)
compose_msg(cmd, log_file)
retcode = os.system(cmd)
return retcode
def launch_pullpo():
# check if octopusl option exists
if options.has_key('pullpo'):
if not check_tool(tools['pullpo']):
return
compose_msg("pullpo is being executed")
launched = False
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_pullpo']
owner = options['pullpo']['owner']
owners = owner.split(",")
if options['pullpo'].has_key('oauth_key'):
oauth_key = options['pullpo']['oauth_key']
else:
user = options['pullpo']['user']
password = options['pullpo']['password']
url = ""
if options['pullpo'].has_key('url'):
url = "--gh-url " + options['pullpo']['url']
log_file = project_dir + '/log/launch_pullpo.log'
# pre-scripts
launch_pre_tool_scripts('pullpo')
# Common pullpo command for all options
if options['pullpo'].has_key('oauth_key'):
auth_params = "--gh-token " + oauth_key
else:
auth_params = "--gh-user=\""+user+"\" --gh-password=\""+password+"\""
pullpo_cmd = tools['pullpo'] + " -u \"%s\" -p \"%s\" -d \"%s\" %s %s " \
%(db_user, db_pass, db_name, auth_params , url)
for owner in owners:
projects = None
if options['pullpo'].has_key('projects') and len(owners) == 1:
projects = options['pullpo']['projects']
elif options['pullpo'].has_key('projects'):
logging.error("Wrong main.conf. Several pullpo owners and general projects config.")
raise
if len(owners) > 1:
if options['pullpo'].has_key('projects_' + owner.lower()):
projects = options['pullpo']['projects_' + owner.lower()].split(",")
if projects:
# Launch pullpo for each project configured
for project in projects:
cmd = pullpo_cmd + "\"%s\" \"%s\">> %s 2>&1" % (owner, project, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
else:
# Launch pullpo for all the repositories
cmd = pullpo_cmd + "\"%s\" >> %s 2>&1" % (owner, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
launched = True
if launched:
compose_msg("[OK] pullpo executed")
else:
compose_msg("[SKIPPED] pullpo not executed")
else:
compose_msg("[SKIPPED] pullpo was not executed, no conf available")
def launch_eventizer():
# check if eventizer option exists
if options.has_key('eventizer'):
if not check_tool(tools['eventizer']):
return
compose_msg("eventizer is being executed")
launched = False
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
db_name = options['generic']['db_eventizer']
if 'key' not in options['eventizer']:
msg = "Metup API key not provided. Use 'key' parameter to set one."
logging.error('[eventizer] ' + msg)
compose_msg("[SKIPPED] eventizer not executed. %s" % msg)
return
if 'groups' not in options['eventizer']:
msg = "Groups list not provided. Use 'groups' parameter to set one."
logging.error('[eventizer] ' + msg)
compose_msg("[SKIPPED] eventizer not executed. %s" % msg)
return
eventizer_key = options['eventizer']['key']
groups = options['eventizer']['groups']
groups = groups.split(",")
log_file = project_dir + '/log/launch_eventizer.log'
# pre-scripts
launch_pre_tool_scripts('eventizer')
# Common pullpo command for all options
auth_params = "--key " + eventizer_key
eventizer_cmd = tools['eventizer'] + " -u \"%s\" -p \"%s\" -d \"%s\" %s " \
%(db_user, db_pass, db_name, auth_params)
for group in groups:
# Launch eventizer for each group
group_name = group.strip()
cmd = eventizer_cmd + "\"%s\" >> %s 2>&1" % (group_name, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
launched = True
if launched:
compose_msg("[OK] eventizer executed")
# post-scripts
launch_post_tool_scripts('eventizer')
else:
compose_msg("[SKIPPED] eventizer not executed")
else:
compose_msg("[SKIPPED] eventizer was not executed, no conf available")
# http://code.activestate.com/recipes/577376-simple-way-to-execute-multiple-process-in-parallel/
def exec_commands(cmds):
''' Exec commands in parallel in multiple process '''
if not cmds: return # empty list
def done(p):
return p.poll() is not None
def success(p):
return p.returncode == 0
def fail():
logging.error("Problems in report_tool.py execution. See logs.")
sys.exit(1)
# max_task = cpu_count()
max_tasks = 2
processes = []
while True:
while cmds and len(processes) < max_tasks:
task = cmds.pop()
# print subprocess.list2cmdline(task)
processes.append(subprocess.Popen(task, shell = True))
for p in processes:
if done(p):
if success(p):
processes.remove(p)
else:
fail()
if not processes and not cmds:
break
else:
time.sleep(0.5)
def get_report_module():
grimoirelib = os.path.join(project_dir, "tools", "GrimoireLib","vizgrimoire")
metricslib = os.path.join(project_dir, "tools", "GrimoireLib","vizgrimoire","metrics")
studieslib = os.path.join(project_dir, "tools", "GrimoireLib","vizgrimoire","analysis")
alchemy = os.path.join(project_dir, "tools", "GrimoireLib")
for dir in [grimoirelib,metricslib,studieslib,alchemy]:
sys.path.append(dir)
import report
report.Report.init(os.path.join(conf_dir,"main.conf"))
return report.Report
def launch_events_scripts():
# Execute metrics tool using the automator config
# Start one report_tool per data source active
if options.has_key('metrics') or options.has_key('r'):
compose_msg("events being generated")
json_dir = '../../../json'
conf_file = project_dir + '/conf/main.conf'
log_file = project_dir + '/log/launch-'
metrics_tool = "report_tool.py"
path = r_dir
params = get_options()
commands = [] # One report_tool per data source
report = get_report_module()
dss = report.get_data_sources()
if params.subtask:
ds = report.get_data_source(params.subtask)
if ds is None:
logging.error("Data source " + params.subtask + " not found")
return
dss = [ds]
ds_events_supported = ['scm']
for ds in dss:
if ds.get_name() not in ds_events_supported: continue
log_file_ds = log_file + ds.get_name()+"-events.log"
os.chdir(path)
cmd = "./%s -c %s -o %s --data-source %s --events >> %s 2>&1" \
% (metrics_tool, conf_file, json_dir, ds.get_name(), log_file_ds)
commands.append([cmd])
exec_commands (commands)
compose_msg("[OK] events generated")
else:
compose_msg("[SKIPPED] Events not generated, no conf available")
def launch_metrics_scripts():
# Execute metrics tool using the automator config
# Start one report_tool per data source active
if options.has_key('metrics') or options.has_key('r'):
if not check_tool(tools['r']):
return
compose_msg("metrics tool being launched")
r_libs = '../../r-lib'
python_libs = '../grimoirelib_alch:../vizgrimoire:../vizgrimoire/analysis:../vizgrimoire/metrics:./'
json_dir = '../../../json'
metrics_dir = '../vizgrimoire/metrics'
conf_file = project_dir + '/conf/main.conf'
log_file = project_dir + '/log/launch-'
metrics_tool = "report_tool.py"
path = r_dir
launch_pre_tool_scripts('r')
params = get_options()
metrics_section = ''
if params.filter:
metrics_section = "--filter " + params.filter
commands = [] # One report_tool per data source
report = get_report_module()
dss = report.get_data_sources()
if params.subtask:
report = get_report_module()
ds = report.get_data_source(params.subtask)
if ds is None:
logging.error("Data source " + params.subtask + " not found")
return
dss = [ds]
for ds in dss:
# if ds.get_name() not in ['scm','its']: continue
log_file_ds = log_file + ds.get_name()+".log"
os.chdir(path)
cmd = "LANG= R_LIBS=%s PYTHONPATH=%s ./%s -c %s -m %s -o %s --data-source %s %s >> %s 2>&1" \
% (r_libs, python_libs, metrics_tool, conf_file, metrics_dir, json_dir, ds.get_name(), metrics_section, log_file_ds)
commands.append([cmd])
exec_commands (commands)
compose_msg("[OK] metrics tool executed")
launch_post_tool_scripts('r')
else:
compose_msg("[SKIPPED] Metrics tool was not executed, no conf available")
def get_ds_identities_cmd(db, type):
idir = identities_dir
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
if (db_pass == ""): db_pass="''"
db_ids = options['generic']['db_identities']
log_file = project_dir + '/log/identities.log'
cmd = "%s/datasource2identities.py -u %s -p %s --db-name-ds=%s --db-name-ids=%s --data-source=%s>> %s 2>&1" \
% (idir, db_user, db_pass, db, db_ids, type, log_file)
return cmd
def launch_identity_scripts():
# using the conf executes cvsanaly for the repos inside scm dir
if options.has_key('identities'):
logging.info("Unique identities scripts are being executed")
# idir = options['identities']['iscripts_path']
idir = identities_dir
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
if (db_pass == ""): db_pass="''"
log_file = project_dir + '/log/identities.log'
if options['generic'].has_key('db_identities') and \
options['generic'].has_key('db_sortinghat'):
if options['generic']['db_identities'] == options['generic']['db_sortinghat']:
compose_msg("Sortinghat configuration. Not executing identities.")
return
if options['generic'].has_key('db_identities'):
db_identities = options['generic']['db_identities']
cmd = "%s/unifypeople.py -u %s -p %s -d %s >> %s 2>&1" % (idir, db_user, db_pass, db_identities, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
cmd = "%s/domains_analysis.py -u %s -p %s -d %s >> %s 2>&1" % (idir, db_user, db_pass, db_identities, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
# Generate unique identities for all data sources active
report = get_report_module()
dss = report.get_data_sources()
for ds in dss:
if ds.get_db_name() == "db_cvsanaly":
continue # db_cvsanaly and db_identities are the same db
if ds.get_db_name() in options['generic']:
db_ds = options['generic'][ds.get_db_name()]
else:
logging.error(ds.get_db_name() + " not in automator main.conf")
continue
cmd = get_ds_identities_cmd(db_ds, ds.get_name())
compose_msg(cmd, log_file)
os.system(cmd)
if options['identities'].has_key('countries'):
cmd = "%s/load_ids_mapping.py -m countries -t true -u %s -p %s --database %s >> %s 2>&1" \
% (idir, db_user, db_pass, db_identities, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
if options['identities'].has_key('companies'):
cmd = "%s/load_ids_mapping.py -m companies -t true -u %s -p %s --database %s >> %s 2>&1" \
% (idir, db_user, db_pass, db_identities, log_file)
compose_msg(cmd, log_file)
os.system(cmd)
logging.info("[OK] Identity scripts executed")
else:
logging.info("[SKIPPED] Unify identity scripts not executed, no conf available")
def compose_msg(text, log_file = None):
# append text to log file
if log_file is None:
fd = open(msg_body, 'a')
else:
fd = open(log_file, 'a')
time_tag = '[' + time.strftime('%H:%M:%S') + ']'
fd.write(time_tag + ' ' + text)
fd.write('\n')
fd.close()
def reset_log():
# remove log file
try:
os.remove(msg_body)
except OSError:
fd = open(msg_body, 'w')
fd.write('')
fd.close()
def launch_copy_json():
# copy JSON files to other directories
# This option helps when having more than one automator, but all of the
# json files should be moved to a centralized directory
if options.has_key('copy-json'):
compose_msg("Copying JSON files to another directory")
destination = os.path.join(project_dir,options['copy-json']['destination_json'])
distutils.dir_util.copy_tree(json_dir, destination)
def launch_commit_jsones():
# copy JSON files and commit + push them
if options.has_key('git-production'):
if not check_tool(tools['git']):
return
compose_msg("Commiting new JSON files with git")
destination = os.path.join(project_dir,options['git-production']['destination_json'])
distutils.dir_util.copy_tree(json_dir, destination)
fd = open(msg_body, 'a')
pr = subprocess.Popen(['/usr/bin/git', 'pull'],
cwd=os.path.dirname(destination),
stdout=fd,
stderr=fd,
shell=False)
(out, error) = pr.communicate()
pr = subprocess.Popen(['/usr/bin/git', 'add', './*'],
cwd=os.path.dirname(destination),
stdout=fd,
stderr=fd,
shell=False)
(out, error) = pr.communicate()
pr = subprocess.Popen(['/usr/bin/git', 'commit', '-m', 'JSON updated by the Owl Bot'],
cwd=os.path.dirname(destination),
stdout=fd,
stderr=fd,
shell=False)
(out, error) = pr.communicate()
pr = subprocess.Popen(['/usr/bin/git', 'push', 'origin', 'master'],
cwd=os.path.dirname(destination),
stdout=fd,
stderr=fd,
shell=False)
(out, error) = pr.communicate()
fd.close()
def launch_database_dump():
# copy and compression of database to be rsync with customers
if options.has_key('db-dump'):
if not check_tool(tools['mysqldump']) or not check_tool(tools['compress']) or not check_tool(tools['rm']):
return
compose_msg("Dumping databases")
dbs = []
# databases
# this may fail if any of the four is not found
db_user = options['generic']['db_user']
db_pass = options['generic']['db_password']
if options['generic'].has_key('db_bicho'):
dbs.append([options['generic']['db_bicho'], 'tickets']);
if options['generic'].has_key('db_cvsanaly'):
dbs.append([options['generic']['db_cvsanaly'],'source_code']);
if options['generic'].has_key('db_mlstats'):
dbs.append([options['generic']['db_mlstats'],'mailing_lists']);
if options['generic'].has_key('db_gerrit'):
dbs.append([options['generic']['db_gerrit'],'reviews']);
if options['generic'].has_key('db_irc'):
if options['irc'].has_key('format'):
if options['irc']['format'] != 'slack':
dbs.append([options['generic']['db_irc'],'irc']);
else:
dbs.append([options['generic']['db_irc'],'irc']);
if options['generic'].has_key('db_mediawiki'):
dbs.append([options['generic']['db_mediawiki'],'mediawiki']);
if options['generic'].has_key('db_releases'):
dbs.append([options['generic']['db_releases'],'releases'])
if options['generic'].has_key('db_qaforums'):
dbs.append([options['generic']['db_qaforums'],'qaforums'])
if options['generic'].has_key('db_sibyl'):
dbs.append([options['generic']['db_sibyl'],'qaforums'])
if options['generic'].has_key('db_downloads'):
dbs.append([options['generic']['db_downloads'],'downloads'])
if options['generic'].has_key('db_pullpo'):
dbs.append([options['generic']['db_pullpo'],'pullpo'])
if options['generic'].has_key('db_eventizer'):
dbs.append([options['generic']['db_eventizer'],'eventizer'])
if options['generic'].has_key('db_projects'):
dbs.append([options['generic']['db_projects'],'projects'])
fd = open(msg_body, 'a')
destination = os.path.join(project_dir,options['db-dump']['destination_db_dump'])
# it's supposed to have db_user as root user
for db in dbs:
dest_mysql_file = destination + db[1] + '.mysql'
dest_7z_file = dest_mysql_file + '.7z'
fd_dump = open(dest_mysql_file, 'w')
# Creation of dump file
pr = subprocess.Popen([tools['mysqldump'], '-u', db_user, '--password='+ db_pass, db[0]],
stdout = fd_dump,
stderr = fd,
shell = False)
(out, error) = pr.communicate()
fd_dump.close()
# Creation of compressed dump file
pr = subprocess.Popen([tools['compress'], 'a', dest_7z_file, dest_mysql_file],
stdout = fd,
stderr = fd,
shell = False)
(out, error) = pr.communicate()
# Remove not compressed file
pr = subprocess.Popen([tools['rm'], dest_mysql_file],
stdout = fd,
stderr = fd,
shell = False)
(out, error) = pr.communicate()
fd.close()
def launch_json_dump():
# copy and compression of json files to be rsync with customers
if options.has_key('json-dump'):
origin = os.path.join(project_dir,options['json-dump']['origin_json_dump'])
origin = origin + "*.json"
destination = os.path.join(project_dir, options['json-dump']['destination_json_dump'])
fd = open(msg_body, 'a')
pr = subprocess.Popen([tools['compress'], 'a', destination, origin],
stdout = fd,
stderr = fd,
shell = False)
(out, error) = pr.communicate()
def launch_rsync():
# copy JSON files and commit + push them
if options.has_key('rsync'):
if not check_tool(tools['rsync']):
return
compose_msg("rsync to production server")
fd = open(msg_body, 'a')
destination = options['rsync']['destination']
pr = subprocess.Popen([tools['rsync'],'--rsh', 'ssh', '-zva', '--stats', '--progress', '--update' ,'--delete', '--exclude', '.git', production_dir, destination],
stdout=fd,
stderr=fd,
shell=False)
(out, error) = pr.communicate()
fd.close()
else:
compose_msg("[SKIPPED] rsync scripts not executed, no conf available")
def write_json_config(data, filename):
# The file should be created in project_dir
# TODO: if file exists create a backup
jsonfile = open(os.path.join(production_dir, filename), 'w')
# jsonfile.write(json.dumps(data, indent=4, separators=(',', ': ')))
jsonfile.write(json.dumps(data, indent=4, sort_keys=True))
jsonfile.close()
def launch_metricsdef_config():
filedir = os.path.join(production_dir, "data")
if not os.path.isdir(filedir):
os.makedirs(filedir)
filename = os.path.join(filedir, "metrics.json")
compose_msg("Writing metrics definition in: " + filename)
report = get_report_module()
automator_file = project_dir + '/conf/main.conf'
metrics_dir = os.path.join(project_dir, "tools", "GrimoireLib","vizgrimoire","metrics")
report.init(automator_file, metrics_dir)
dss_active = report.get_data_sources()
all_metricsdef = {}
for ds in dss_active:
compose_msg("Metrics def for " + ds.get_name())
metricsdef = ds.get_metrics_definition(ds)
if metricsdef is not None:
all_metricsdef[ds.get_name()] = metricsdef
from GrimoireUtils import createJSON
createJSON(all_metricsdef, filename)
def launch_vizjs_config():
report = get_report_module()
config = {}
active_ds = []
dss = report.get_data_sources()
for ds in dss:
active_ds.append(ds.get_name())
if options['generic'].has_key('markers'):
config['markers'] = options['generic']['markers'];
if not ('end_date' in options['r']):
options['r']['end_date'] = time.strftime('%Y-%m-%d')
config['data-sources'] = active_ds
config['reports'] = options['r']['reports'].split(",")
config['period'] = options['r']['period']
config['start_date'] = options['r']['start_date']
config['end_date'] = options['r']['end_date']
config['project_info'] = get_project_info()
compose_msg("Writing config file for VizGrimoireJS: " + production_dir + "config.json")
write_json_config(config, 'config.json')
# create the project-info.json file
def get_project_info():
project_info = {
"date":"",
"project_name" : options['generic']['project'],
"project_url" :"",
"scm_url":"",
"scm_name":"",
"scm_type":"git",
"its_url":"",
"its_name":"Tickets",
"its_type":"",
"mls_url":"",
"mls_name":"",
"mls_type":"",
"scr_url":"",
"scr_name":"",
"scr_type":"",
"irc_url":"",
"irc_name":"",
"irc_type":"",
"mediawiki_url":"",
"mediawiki_name":"",
"mediawiki_type":"",
"sibyl_url":"",
"sibyl_name":"",
"sibyl_type":"",
"producer":"Automator",
"blog_url":""
}
# ITS URL
if options.has_key('bicho'):
its_url = options['bicho']['trackers'][0]
aux = its_url.split("//",1)
its_url = aux[0]+"//"+aux[1].split("/")[0]
project_info['its_url'] = its_url
# SCM URL: not possible until automator download gits
scm_url = ""
# MLS URL
if options.has_key('mlstats'):
aux = options['mlstats']['mailing_lists']
mls_url = aux.split(",")[0]
aux = mls_url.split("//",1)
if (len(aux) > 1):
mls_url = aux[0]+"//"+aux[1].split("/")[0]
project_info['mls_url'] = mls_url
project_info['mls_name'] = "Mailing lists"
# SCR URL
if options.has_key('gerrit'):
scr_url = "http://"+options['gerrit']['trackers'][0]
project_info['scr_url'] = scr_url
# Mediawiki URL
if options.has_key('mediawiki'):
mediawiki_url = options['mediawiki']['sites']
project_info['mediawiki_url'] = mediawiki_url
return project_info
def print_std(string, new_line=True):
# Send string to standard input if quiet mode is disabled
if not opt.quiet_mode:
if new_line:
print(string)
else:
print(string),
# All tasks related to data gathering
tasks_section_gather = {
'cvsanaly':launch_cvsanaly,
'bicho':launch_bicho,
'downloads': launch_downloads,
'gerrit':launch_gerrit,
'irc': launch_irc,
'mediawiki': launch_mediawiki,
'mlstats':launch_mlstats,
'sibyl': launch_sibyl,
'octopus': launch_octopus,
'pullpo': launch_pullpo,
'eventizer': launch_eventizer
}
tasks_section = dict({
'check-dbs':launch_checkdbs,
'copy-json': launch_copy_json,
'db-dump':launch_database_dump,
'gather':launch_gather,
'git-production':launch_commit_jsones,
'identities': launch_identity_scripts,
'sortinghat': launch_sortinghat,
'json-dump':launch_json_dump,
'events':launch_events_scripts,
'metrics':launch_metrics_scripts,
'metricsdef':launch_metricsdef_config,
'r':launch_metrics_scripts, # compatibility support
'rsync':launch_rsync,
'vizjs':launch_vizjs_config
}.items() + tasks_section_gather.items())
# vizjs: config.json deactivate until more testing in VizJS-lib
# metricsdef: metrics.json deactivated until more testing in VizJS-lib
# Use this for serial execution of data gathering
tasks_order_serial = ['check-dbs','cvsanaly','bicho','gerrit','mlstats','irc','mediawiki', 'downloads',
'sibyl','octopus','pullpo','eventizer','sortinghat','events','metrics','copy-json',
'git-production','db-dump','json-dump','rsync']
# Use this for parallel execution of data gathering
tasks_order_parallel = ['check-dbs','gather','sortinghat','events','metrics','copy-json',
'git-production','db-dump','json-dump','rsync']
tasks_order = tasks_order_parallel
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(message)s')
opt = get_options()
initialize_globals(opt.project_dir)
pid = str(os.getpid())
pidfile = os.path.join(opt.project_dir, "launch.pid")
if os.path.isfile(pidfile):
print_std("%s already exists, launch process seems to be running. Exiting .." % pidfile)
sys.exit()
else:
file(pidfile, 'w').write(pid)
reset_log()
compose_msg("Starting ..")
read_main_conf()
check_tools()
if opt.section is not None:
tasks_section[opt.section]()
else:
for section in tasks_order:
t0 = dt.datetime.now()
print_std("Executing %s ...." % (section), new_line=False)
sys.stdout.flush()
tasks_section[section]()
t1 = dt.datetime.now()
print_std(" %s minutes" % ((t1-t0).seconds/60))
print_std("Finished.")
compose_msg("Process finished correctly ...")
# done, we sent the result
project = options['generic']['project']
mail = options['generic']['mail']
os.system("mail -s \"[%s] data updated\" %s < %s" % (project, mail, msg_body))
os.unlink(pidfile)
| jgbarah/Automator | launch.py | Python | gpl-3.0 | 76,615 | [
"Octopus"
] | 6e1c28c8bd1452863ac97cab01dde4fb7439106ae9184e6c26f997fcd6539d42 |
from WebAppDIRAC.Lib.WebHandler import WebHandler, asyncGen
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import gConfig, gLogger
from DIRAC.Core.Utilities import Time
import json
class PilotSummaryHandler(WebHandler):
AUTH_PROPS = "authenticated"
@asyncGen
def web_getPilotSummaryData(self):
RPC = RPCClient("WorkloadManagement/WMSAdministrator", timeout = 600 )
callback = {}
req = self.__request()
result = yield self.threadTask(RPC.getPilotSummaryWeb, req, self.globalSort , self.pageNumber, self.numberOfJobs)
if not result["OK"]:
self.finish({"success":"false", "result":[], "total":0, "error":result["Message"]})
return
result = result["Value"]
if not result.has_key("TotalRecords"):
self.finish({"success":"false", "result":[], "total":-1, "error":"Data structure is corrupted"})
return
if not (result["TotalRecords"] > 0):
self.finish({"success":"false", "result":[], "total":0, "error":"There were no data matching your selection"})
return
if not (result.has_key("ParameterNames") and result.has_key("Records")):
self.finish({"success":"false", "result":[], "total":-1, "error":"Data structure is corrupted"})
return
if not (len(result["ParameterNames"]) > 0):
self.finish({"success":"false", "result":[], "total":-1, "error":"ParameterNames field is missing"})
return
if not (len(result["Records"]) > 0):
self.finish({"success":"false", "result":[], "total":0, "Message":"There are no data to display"})
return
callback = []
jobs = result["Records"]
head = result["ParameterNames"]
headLength = len(head)
for i in jobs:
tmp = {}
for j in range(0,headLength):
tmp[head[j]] = i[j]
callback.append(tmp)
total = result["TotalRecords"]
total = result["TotalRecords"]
timestamp = Time.dateTime().strftime("%Y-%m-%d %H:%M [UTC]")
if result.has_key("Extras"):
st = self.__dict2string({})
extra = result["Extras"]
callback = {"success":"true", "result":callback, "total":total, "extra":extra, "request":st, "date":timestamp }
else:
callback = {"success":"true", "result":callback, "total":total, "date":timestamp}
self.finish(callback)
def __dict2string(self, req):
result = ""
try:
for key, value in req.iteritems():
result = result + str(key) + ": " + ", ".join(value) + "; "
except Exception, x:
pass
gLogger.info("\033[0;31m Exception: \033[0m %s" % x)
result = result.strip()
result = result[:-1]
return result
@asyncGen
def web_getSelectionData(self):
sData = self.getSessionData()
callback = {}
group = sData["user"]["group"]
user = sData["user"]["username"]
if user == "Anonymous":
self.finish({"success":"false", "result":[], "total":0, "error":"Insufficient rights"})
else:
RPC = RPCClient("WorkloadManagement/JobMonitoring")
result = yield self.threadTask(RPC.getSites)
if result["OK"]:
tier1 = gConfig.getValue("/WebApp/PreferredSites")
if tier1:
try:
tier1 = tier1.split(", ")
except:
tier1 = list()
else:
tier1 = list()
site = []
if len(result["Value"])>0:
s = list(result["Value"])
for i in tier1:
site.append([str(i)])
for i in s:
if i not in tier1:
site.append([str(i)])
else:
site = [["Nothing to display"]]
else:
site = [["Error during RPC call"]]
callback["site"] = site
callback['Status'] = [['Good'],['Bad'],['Idle'],['Poor'],['Fair']]
self.finish(callback)
################################################################################
def __request(self):
self.pageNumber = 0
self.numberOfJobs = 25
self.globalSort = [["GridSite","ASC"]]
sData = self.getSessionData()
req = {}
group = sData["user"]["group"]
user = sData["user"]["username"]
if "limit" in self.request.arguments:
self.numberOfJobs = int(self.request.arguments["limit"][-1])
if "start" in self.request.arguments:
self.pageNumber = int(self.request.arguments["start"][-1])
else:
self.pageNumber = 0
else:
self.numberOfJobs = 25
self.pageNumber = 0
found = False
if 'id' in self.request.arguments:
jobids = list(json.loads(self.request.arguments[ 'id' ][-1]))
if len(jobids) > 0:
req['JobID'] = jobids
found = True
elif 'expand' in self.request.arguments:
expand = list(json.loads(self.request.arguments[ 'expand' ][-1]))
if len(expand) > 0:
globalSort = [["GridSite","ASC"]]
numberOfJobs = 500
pageNumber = 0
req["ExpandSite"] = expand[0]
found = True
if not found:
if 'prod' in self.request.arguments:
value = list(json.loads(self.request.arguments["prod"][-1]))
if len(value) > 0:
req["JobGroup"] = value
if 'site' in self.request.arguments:
value = list(json.loads(self.request.arguments["site"][-1]))
if len(value) > 0:
if len(value) == 1:
req["ExpandSite"] = value[0]
else:
req["GridSite"] = value
if 'Status' in self.request.arguments:
value = list(json.loads(self.request.arguments["Status"][-1]))
if len(value) > 0:
req['Status'] = value
if 'sort' in self.request.arguments:
sort = json.loads(self.request.arguments['sort'][-1])
if len(sort) > 0:
self.globalSort = []
for i in sort :
self.globalSort += [[i['property'],i['direction']]]
else:
self.globalSort = [["GridSite","DESC"]]
if 'startDate' in self.request.arguments and len(self.request.arguments["startDate"][0]) > 0:
if 'startTime' in self.request.arguments and len(self.request.arguments["startTime"][0]) > 0:
req["FromDate"] = str(self.request.arguments["startDate"][0] + " " + self.request.arguments["startTime"][0])
else:
req["FromDate"] = str(self.request.arguments["startDate"][0])
if 'endDate' in self.request.arguments and len(self.request.arguments["endDate"][0]) > 0:
if 'endTime' in self.request.arguments and len(self.request.arguments["endTime"][0]) > 0:
req["ToDate"] = str(self.request.arguments["endDate"][0] + " " + self.request.arguments["endTime"][0])
else:
req["ToDate"] = str(self.request.arguments["endDate"][0])
if 'date' in self.request.arguments and len(self.request.arguments["date"][0]) > 0:
req["LastUpdate"] = str(self.request.arguments["date"][0])
gLogger.info("REQUEST:",req)
return req
| zmathe/WebAppDIRAC | WebApp/handler/PilotSummaryHandler.py | Python | gpl-3.0 | 6,823 | [
"DIRAC"
] | abf9c5566d14a82c9340a3b4e2a928a21c5f88149095c0cea8993dcbb55d46ba |
# modified mexican hat wavelet test.py
# spectral analysis for RADAR and WRF patterns
# NO plotting - just saving the results: LOG-response spectra for each sigma and max-LOG response numerical spectra
# pre-convolved with a gaussian filter of sigma=10
import os, shutil
import time, datetime
import pickle
import numpy as np
from scipy import signal, ndimage
import matplotlib.pyplot as plt
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
#from armor import misc as ms
dbz = pattern.DBZ
kongreywrf = ob.kongreywrf
kongreywrf.fix()
kongrey = ob.kongrey
monsoon = ob.monsoon
monsoon.list= [v for v in monsoon.list if '20120612' in v.dataTime] #fix
march2014 = ob.march2014
march2014wrf11 = ob.march2014wrf11
march2014wrf12 = ob.march2014wrf12
march2014wrf = ob.march2014wrf
march2014wrf.fix()
################################################################################
# hack
#kongrey.list = [v for v in kongrey.list if v.dataTime>="20130828.2320"]
################################################################################
# parameters
sigmaPreprocessing = 16 # sigma for preprocessing, 2014-05-15
testName = "modifiedMexicanHatTest15_march2014_sigmaPreprocessing" + str(sigmaPreprocessing)
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256,]
dbzstreams = [march2014]
sigmaPower=0
scaleSpacePower=0 #2014-05-14
testScriptsFolder = dp.root + 'python/armor/tests/'
timeString = str(int(time.time()))
outputFolder = dp.root + 'labLogs/%d-%d-%d-%s/' % \
(time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, testName)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(testScriptsFolder+testName+".py", outputFolder+ timeString + testName+".py")
# end parameters
################################################################################
summaryFile = open(outputFolder + timeString + "summary.txt", 'a')
for ds in dbzstreams:
summaryFile.write("\n===============================================================\n\n\n")
streamMean = 0.
dbzCount = 0
#hack
#streamMean = np.array([135992.57472004235, 47133.59049120619, 16685.039217734946, 11814.043851969862, 5621.567482638702, 3943.2774923729303, 1920.246102887001, 1399.7855335686243, 760.055614122099, 575.3654495432361, 322.26668666562375, 243.49842951291757, 120.54647935045809, 79.05741086463254, 26.38971066782135])
#dbzCount = 140
for a in ds:
print "-------------------------------------------------"
print testName
print
print a.name
a.load()
a.setThreshold(0)
a.saveImage(imagePath=outputFolder+a.name+".png")
L = []
a.responseImages = [] #2014-05-02
#for sigma in [1, 2, 4, 8 ,16, 32, 64, 128, 256, 512]:
for sigma in sigmas:
print "sigma:", sigma
a.load()
a.setThreshold(0)
arr0 = a.matrix
#####################################################################
arr0 = ndimage.filters.gaussian_filter(arr0, sigma=sigmaPreprocessing) # <-- 2014-05-15
#####################################################################
#arr1 = signal.convolve2d(arr0, mask_i, mode='same', boundary='fill')
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) #2014-05-07
#arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**2 #2014-04-29
arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**scaleSpacePower #2014-05-14
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
#a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
###############################################################################
# computing the spectrum, i.e. sigma for which the LOG has max response
# 2014-05-02
a.responseImages.append({'sigma' : sigma,
'matrix' : arr1 * sigma**2,
})
pickle.dump(a.responseImages, open(outputFolder+a.name+"responseImagesList.pydump",'w'))
a_LOGspec = dbz(name= a.name + "Laplacian-of-Gaussian_numerical_spectrum",
imagePath=outputFolder+a1.name+"_LOGspec.png",
outputPath = outputFolder+a1.name+"_LOGspec.dat",
cmap = 'jet',
)
a.responseImages = np.dstack([v['matrix'] for v in a.responseImages])
#print 'shape:', a.responseImages.shape #debug
a.responseMax = a.responseImages.max(axis=2) # the deepest dimension
a_LOGspec.matrix = np.zeros(a.matrix.shape)
for count, sigma in enumerate(sigmas):
a_LOGspec.matrix += sigma * (a.responseMax == a.responseImages[:,:,count])
a_LOGspec.vmin = a_LOGspec.matrix.min()
a_LOGspec.vmax = a_LOGspec.matrix.max()
print "saving to:", a_LOGspec.imagePath
#a_LOGspec.saveImage()
print a_LOGspec.outputPath
#a_LOGspec.saveMatrix()
#a_LOGspec.histogram(display=False, outputPath=outputFolder+a1.name+"_LOGspec_histogram.png")
pickle.dump(a_LOGspec, open(outputFolder+ a_LOGspec.name + ".pydump","w"))
# end computing the sigma for which the LOG has max response
# 2014-05-02
##############################################################################
#pickle.dump(L, open(outputFolder+ a.name +'_test_results.pydump','w')) # no need to dump if test is easy
sigmas = np.array([v['sigma'] for v in L])
y1 = [v['abssum1'] for v in L]
plt.close()
plt.plot(sigmas,y1)
plt.title(a1.name+ '\n absolute values against sigma')
plt.savefig(outputFolder+a1.name+"-spectrum-histogram.png")
plt.close()
# now update the mean
streamMeanUpdate = np.array([v['abssum1'] for v in L])
dbzCount += 1
streamMean = 1.* ((streamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
print "Stream Count and Mean so far:", dbzCount, streamMean
# now save the mean and the plot
summaryText = '\n---------------------------------------\n'
summaryText += str(int(time.time())) + '\n'
summaryText += "dbzStream Name: " + ds.name + '\n'
summaryText += "dbzCount:\t" + str(dbzCount) + '\n'
summaryText +="sigma=\t\t" + str(sigmas.tolist()) + '\n'
summaryText += "streamMean=\t" + str(streamMean.tolist()) +'\n'
print summaryText
print "saving..."
# release the memory
a.matrix = np.array([0])
summaryFile.write(summaryText)
plt.close()
plt.plot(sigmas, streamMean* (sigmas**sigmaPower))
plt.title(ds.name + '- average laplacian-of-gaussian numerical spectrum\n' +\
'for ' +str(dbzCount) + ' DBZ patterns\n' +\
'suppressed by a factor of sigma^' + str(sigmaPower) )
plt.savefig(outputFolder + ds.name + "_average_LoG_numerical_spectrum.png")
plt.close()
summaryFile.close()
| yaukwankiu/armor | tests/modifiedMexicanHatTest15_march2014_sigmaPreprocessing16.py | Python | cc0-1.0 | 7,833 | [
"Gaussian"
] | 61387c1f970416612ba2ba83952a655bde55bd4188579e98323c62d2e40c0f02 |
# A simple CLI runner for slurm that can be used when running Galaxy from a
# non-submit host and using a Slurm cluster.
try:
from galaxy.model import Job
job_states = Job.states
except ImportError:
# Not in Galaxy, map Galaxy job states to LWR ones.
from galaxy.util import enum
job_states = enum(RUNNING='running', OK='complete', QUEUED='queued')
from ..job import BaseJobExec
__all__ = ('Slurm',)
from logging import getLogger
log = getLogger(__name__)
argmap = {
'time': '-t',
'ncpus': '-c',
'partition': '-p'
}
class Slurm(BaseJobExec):
def __init__(self, **params):
self.params = {}
for k, v in params.items():
self.params[k] = v
def job_script_kwargs(self, ofile, efile, job_name):
scriptargs = {'-o': ofile,
'-e': efile,
'-J': job_name}
# Map arguments using argmap.
for k, v in self.params.items():
if k == 'plugin':
continue
try:
if not k.startswith('-'):
k = argmap[k]
scriptargs[k] = v
except:
log.warning('Unrecognized long argument passed to Slurm CLI plugin: %s' % k)
# Generated template.
template_scriptargs = ''
for k, v in scriptargs.items():
template_scriptargs += '#SBATCH %s %s\n' % (k, v)
return dict(headers=template_scriptargs)
def submit(self, script_file):
return 'sbatch %s' % script_file
def delete(self, job_id):
return 'scancel %s' % job_id
def get_status(self, job_ids=None):
return 'squeue -a -o \\"%A %t\\"'
def get_single_status(self, job_id):
return 'squeue -a -o \\"%A %t\\" -j ' + job_id
def parse_status(self, status, job_ids):
# Get status for each job, skipping header.
rval = {}
for line in status.splitlines()[1:]:
id, state = line.split()
if id in job_ids:
# map job states to Galaxy job states.
rval[id] = self._get_job_state(state)
return rval
def parse_single_status(self, status, job_id):
status = status.splitlines()
if len(status) > 1:
# Job still on cluster and has state.
id, state = status[1].split()
return self._get_job_state(state)
return job_states.OK
def _get_job_state(self, state):
try:
return {
'F': job_states.ERROR,
'R': job_states.RUNNING,
'CG': job_states.RUNNING,
'PD': job_states.QUEUED,
'CD': job_states.OK
}.get(state)
except KeyError:
raise KeyError("Failed to map slurm status code [%s] to job state." % state)
| jmchilton/lwr | lwr/managers/util/cli/job/slurm.py | Python | apache-2.0 | 2,840 | [
"Galaxy"
] | 9b7a9d587de78e9f9f2d3787030938d95d2e120dfcdadea5aeca8efd57dcc2cb |
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from sysconfig import get_config_vars, get_path
from setuptools import SetuptoolsDeprecationWarning
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources.py31compat
__metaclass__ = type
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_bytes(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_bytes(s):
return s.encode('utf8')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('install-layout=', None, "installation layout to choose (known values: deb)"),
('force-installation-into-system-dir', '0', "force installation into /usr"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version', 'force-installation-into-system-dir'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# enable custom installation, known values: deb
self.install_layout = None
self.force_installation_into_system_dir = None
self.multiarch = None
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = '{}.{}'.format(*sys.version_info)
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
if self.install_layout:
if not self.install_layout.lower() in ['deb']:
raise DistutilsOptionError("unknown value for --install-layout")
self.install_layout = self.install_layout.lower()
import sysconfig
if sys.version_info[:2] >= (3, 3):
self.multiarch = sysconfig.get_config_var('MULTIARCH')
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
if self.prefix == '/usr' and not self.force_installation_into_system_dir:
raise DistutilsOptionError("""installation into /usr
Trying to install into the system managed parts of the file system. Please
consider to install to another location, or use the option
--force-installation-into-system-dir to overwrite this warning.
""")
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.org/simple/"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self, show_deprecation=True):
if show_deprecation:
self.announce(
"WARNING: The easy_install command is deprecated "
"and will be removed in a future version."
, log.WARN,
)
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = list(sorted(self.outputs))
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
if instdir not in map(normalize_path, _pythonpath()):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
pkg_resources.py31compat.makedirs(dirname, exist_ok=True)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
if not self.editable:
self.install_site_py()
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_bytes(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
if self.dry_run:
return
mask = current_umask()
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.whl'):
return [self.install_wheel(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def install_wheel(self, wheel_path, tmpdir):
wheel = Wheel(wheel_path)
assert wheel.is_compatible()
destination = os.path.join(self.install_dir, wheel.egg_name())
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
self.execute(
wheel.install_as_egg,
(destination,),
("Installing %s to %s") % (
os.path.basename(wheel_path),
os.path.dirname(destination)
),
)
finally:
update_dist_caches(destination, fix_zipimporter_caches=False)
self.add_output(destination)
return self.egg_distribution(destination)
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
if sys.version[:3] in ('2.3', '2.4', '2.5') or 'real_prefix' in sys.__dict__:
sitedir_name = 'site-packages'
else:
sitedir_name = 'dist-packages'
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
unix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
posix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
deb_system = dict(
install_dir = '$base/lib/python3/%s' % sitedir_name,
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix or self.install_layout:
if self.install_layout and self.install_layout in ['deb']:
scheme_name = "deb_system"
self.prefix = '/usr'
elif self.prefix or 'real_prefix' in sys.__dict__:
scheme_name = os.name
else:
scheme_name = "posix_local"
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(scheme_name,self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
def get_site_dirs():
"""
Return a list of 'site' dirs
"""
sitedirs = []
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"local/lib",
"python" + sys.version[:3],
"dist-packages",
),
os.path.join(
prefix,
"lib",
"python{}.{}".format(*sys.version_info),
"dist-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
'{}.{}'.format(*sys.version_info),
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# http://bit.ly/2h9itJX
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter:
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", EasyInstallDeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
if wininst:
executable = "python.exe"
return cls.get_header(script_text, executable)
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
"""Class for warning about deprecations in EasyInstall in SetupTools. Not ignored by default, unlike DeprecationWarning."""
| etherkit/OpenBeacon2 | client/linux-x86/venv/lib/python3.8/site-packages/setuptools/command/easy_install.py | Python | gpl-3.0 | 89,903 | [
"VisIt"
] | d2563c020c5efbb22032dc60c45b8e63536b0e507339496908ab2f6b25e54d86 |
"""
Enterprise Login tests
"""
from datetime import datetime
from bok_choy.web_app_test import WebAppTest
from regression.pages import LOGIN_EMAIL, LOGIN_PASSWORD
from regression.pages.enterprise.course_about_page import CourseAboutPageExtended
from regression.pages.enterprise.ent_course_enrollment_page import EnterpriseCourseEnrollment
from regression.pages.enterprise.ent_data_sharing_consent_page import EnterpriseDataSharingConsentPage
from regression.pages.enterprise.ent_edx_login_page import EnterpriseEdxLogin
from regression.pages.enterprise.ent_edx_registration_page import EnterpriseEdxRegistration
from regression.pages.enterprise.ent_portal_course_page import (
EnterprisePortalCourseStart,
EnterprisePortalCourseStructure
)
from regression.pages.enterprise.ent_portal_home_page import EnterprisePortalHome
from regression.pages.enterprise.ent_portal_login_page import EnterprisePortalLogin
from regression.pages.enterprise.enterprise_const import (
ENT_COURSE_TITLE,
ENT_PORTAL_PASSWORD,
ENT_PORTAL_USERNAME,
ENTERPRISE_NAME,
IDP_CSS_ID
)
from regression.pages.enterprise.user_account import UserAccountSettings
from regression.pages.lms import LMS_BASE_URL, LMS_PROTOCOL
from regression.pages.lms.dashboard_lms import DashboardPageExtended
from regression.pages.lms.login_lms import LmsLogin
from regression.pages.lms.track_selection_page import TrackSelectionPage
from regression.pages.whitelabel.basket_page import CyberSourcePage, SingleSeatBasketPage
from regression.pages.whitelabel.const import BILLING_INFO, CARD_HOLDER_INFO
from regression.pages.whitelabel.courses_page import CoursesPage
from regression.pages.whitelabel.ecommerce_courses_page import EcommerceCoursesPage
from regression.pages.whitelabel.receipt_page import ReceiptPage
from regression.tests.helpers.api_clients import LmsApiClient, LmsLoginApi, LogoutApi
from regression.tests.helpers.utils import get_random_credentials
class EnterpriseTestBase(WebAppTest):
"""
Test Enterprise Login
"""
def setUp(self):
"""
Initialize all page objects
"""
super().setUp()
self.browser.maximize_window()
self.ent_portal_login = EnterprisePortalLogin(self.browser)
self.ent_portal_home = EnterprisePortalHome(self.browser)
self.ent_portal_course_start = \
EnterprisePortalCourseStart(self.browser)
self.ent_portal_course_structure = \
EnterprisePortalCourseStructure(self.browser)
self.ent_course_enrollment = \
EnterpriseCourseEnrollment(self.browser)
self.ent_data_sharing_consent = \
EnterpriseDataSharingConsentPage(self.browser)
self.ecommerce_courses_page = \
EcommerceCoursesPage(self.browser)
self.lms_login = LmsLogin(self.browser)
self.ent_edx_registration = EnterpriseEdxRegistration(self.browser)
self.ent_edx_login = EnterpriseEdxLogin(self.browser)
self.dashboard = DashboardPageExtended(self.browser)
self.courses_page = CoursesPage(self.browser)
self.course_about_page = CourseAboutPageExtended(self.browser)
self.track_selection_page = TrackSelectionPage(self.browser)
self.user_account = UserAccountSettings(self.browser)
self.cyber_source_page = CyberSourcePage(self.browser)
self.single_seat_basket = SingleSeatBasketPage(self.browser)
self.receipt_page = ReceiptPage(self.browser)
self.lms_api_client = LmsApiClient()
self.login_api = LmsLoginApi()
self.logout_api = LogoutApi()
def unlink_account(self):
"""
Unlink IDP Account
This serves as a fixture for unlinked user test case, it unlinks the
user after running the tests to make sure that the precondition
of test is true
"""
# Visit account setting page
self.user_account.visit()
self.user_account.switch_account_settings_tabs('accounts-tab')
# If linked account is found, unlink it
if self.user_account.is_idp_account_linked(IDP_CSS_ID):
self.user_account.unlink_idp_account(IDP_CSS_ID)
# Logout using api
self.logout_from_lms_using_api()
def add_recovery_email(self, email):
"""
Add secondary email address for enterprise learner
"""
self.user_account.visit()
self.user_account.fill_secondary_email_field(email)
def login_to_ent_portal(self, ent_portal_username, ent_portal_password):
"""
Login to enterprise portal and find the course and click on it
"""
# Open portal
self.ent_portal_login.visit()
# Login
self.ent_portal_login.login_to_portal(
ent_portal_username,
ent_portal_password)
self.ent_portal_home.wait_for_page()
def access_course(self):
"""
Access the course from portal
"""
# Open the course pop up and look for the desired course
self.ent_portal_home.open_courses_popup()
course_titles = self.ent_portal_home.fetch_course_titles_list()
self.assertTrue(
ENT_COURSE_TITLE in course_title
for course_title in course_titles
)
# Go to course page and then use the link there to go to edX
self.ent_portal_home.open_enterprise_course_page(
ENT_COURSE_TITLE
)
self.ent_portal_course_start.wait_for_page()
self.ent_portal_course_start.start_or_continue_course()
self.ent_portal_course_structure.wait_for_page()
self.ent_portal_course_structure.open_course_on_edx()
# Get handle of newly opened edx window and switch control to it
edx_window = self.driver.window_handles[1]
self.driver.switch_to_window(edx_window)
def login_ent_edx_user(self):
"""
Login the user using edX customized logistration page
"""
# edx credentials
self.ent_edx_login.wait_for_page()
self.assertEqual(
ENTERPRISE_NAME,
self.ent_edx_login.get_enterprise_name()
)
self.ent_edx_login.login(
LOGIN_EMAIL,
LOGIN_PASSWORD
)
def register_ent_edx_user(self):
"""
Register the enterprise user using edX customized logistration page
"""
__, email = get_random_credentials()
self.ent_edx_registration.visit()
self.assertEqual(
ENTERPRISE_NAME,
self.ent_edx_registration.get_enterprise_name()
)
self.ent_edx_registration.register(
email=email,
full_name='Enterprise Test User',
country="US"
)
def logout_from_lms_using_api(self):
"""
Get cookies from browser and send these cookie to python request to
logout using api
"""
self.logout_api.logout_url = '{}://{}/{}'.format(
LMS_PROTOCOL,
LMS_BASE_URL,
'logout'
)
self.logout_api.cookies = self.browser.get_cookies()
self.logout_api.logout()
def login_user_lms_using_api(self):
"""
Login user to LMS using login API
"""
self.login_api.authenticate(self.browser)
def login_and_go_to_course_enrollment_page(self):
"""
Flow which covers the user login on enterprise portal, selecting the
course and then login to edx course enrollment page
"""
# The edX site is visited just to make sure that when user jumps to
# edX from portal we don't have to handle authentication popup
self.lms_login.visit()
# Enterprise portal flow
self.login_to_ent_portal(
ENT_PORTAL_USERNAME,
ENT_PORTAL_PASSWORD)
self.access_course()
self.login_ent_edx_user()
# Verify that user is on course enrollment page
self.ent_course_enrollment.wait_for_page()
def register_and_go_to_course_enrollment_page(self):
"""
Flow which covers the user login on enterprise portal, selecting the
course and then register to edx course enrollment page
"""
# The edX site is visited just to make sure that when user jumps to
# edX from portal we don't have to handle authentication popup
self.lms_login.visit()
# Enterprise portal flow
self.login_to_ent_portal(
ENT_PORTAL_USERNAME,
ENT_PORTAL_PASSWORD)
self.access_course()
self.ent_edx_login.wait_for_page()
self.register_ent_edx_user()
# Verify that user is on course enrollment page
self.ent_course_enrollment.wait_for_page()
def payment_using_cyber_source(self):
"""
Make payment for course by providing Billing Info and Payment details
in respected areas.
"""
self.cyber_source_page.set_card_holder_info(CARD_HOLDER_INFO)
self.cyber_source_page.set_billing_info(BILLING_INFO)
self.cyber_source_page.click_payment_button()
self.receipt_page.wait_for_page()
def register_edx_user(self):
"""
Register the user using edX registration page
"""
username, email = get_random_credentials()
self.ent_edx_registration.visit()
self.ent_edx_registration.register(
email=email,
full_name='Test User',
username=username,
password='test123test',
country="US"
)
self.dashboard.wait_for_page()
def verify_info_is_populated_on_basket(self, discounted_price):
"""
After User accept data sharing consent from landing pag
verify that following information is
displayed correctly on basket page:
i) Enterprise offer is applied
ii) Discounted amount
Arguments:
discounted_price(float): Discounted price of the course.
"""
self.assertTrue(self.single_seat_basket.is_offer_applied())
self.assertEqual(
self.single_seat_basket.total_price_after_discount,
discounted_price
)
self.payment_using_cyber_source()
def verify_receipt_info_for_discounted_course(self):
"""
Verify that info on receipt page is correct.
Verify
i) Course title.
ii) Order date
"""
self.assertIn(ENT_COURSE_TITLE, self.receipt_page.order_desc)
self.assertEqual(
datetime.utcnow().strftime("%Y-%m-%d"),
self.receipt_page.order_date
)
| edx/edx-e2e-tests | regression/tests/enterprise/ent_test_base.py | Python | agpl-3.0 | 10,648 | [
"VisIt"
] | a78657d9a5c5d994cd86fe2c0d7289450529f63571cafb6d5b5a77b4fae3be25 |
#!/usr/bin/env python
########################################################################
# $HeadURL: svn+ssh://svn.cern.ch/reps/dirac/DIRAC/trunk/DIRAC/WorkloadManagementSystem/private/DIRACPilotDirector.py $
# File : test_wrapperScript.py
# Author : Ricardo Graciani
########################################################################
#
# Testing DIRAC.Resources.Computing.Pilot.writeScript
# Example:
# python test_wrapperScript.py | tee script.py && chmod +x script.py && ./script.py
#
__RCSID__ = "$Id: DIRACPilotDirector.py 28536 2010-09-23 06:08:40Z rgracian $"
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.ProxyGeneration import CLIParams, generateProxy
from DIRAC.Core.Security.Locations import getProxyLocation
from DIRAC.Core.Security.X509Chain import X509Chain
Script.disableCS()
Script.parseCommandLine()
proxyFile = getProxyLocation()
if not proxyFile:
retVal = generateProxy(CLIParams())
if not retVal['OK']:
proxy = None
else:
proxy = X509Chain()
proxy.loadChainFromFile(retVal['Value'])
else:
proxy = X509Chain()
proxy.loadChainFromFile(proxyFile)
from DIRAC.Resources.Computing import Pilot
import os
pilotFile = Pilot.__file__
print Pilot.wrapperScript( 'python' ,
arguments=['-c','import Pilot,os;print Pilot.__file__;print os.getcwd();os.system("ls -la")'],
proxy = proxy,
sandboxDict = {'test.py':'test.py', os.path.basename( pilotFile ) : pilotFile },
environDict = {'HOME': '/tmp'},
execDir='$HOME' )
| Sbalbp/DIRAC | Resources/Computing/test/test_writeScript.py | Python | gpl-3.0 | 1,628 | [
"DIRAC"
] | aec2131cb1f1d9db8dd97ae14bae9cac92f8bebfc71268178c919fbd42762daf |
import pandas as pd
import matplotlib as mpl
mpl.use('Agg') # drop tkinter dependecy
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as dates
from time import time, mktime
from datetime import datetime
from sqlalchemy import create_engine, select as sqlselect, between
from sqlalchemy import and_
from sqlalchemy.orm import sessionmaker
from sql_declaration import Log, Base
# Create db session
engine = create_engine('sqlite:///admin_log.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def date_to_unix(hour, day, month, year):
d = datetime(year, month, day, hour)
unix_time = mktime(d.timetuple())
return unix_time
def calculate_end(begin, length): # length in hours
end = begin + (length * 60 * 60)
return end
def select_data(begin, end, person=None):
if person:
expression = sqlselect([Log]).where(and_(between(Log.timestamp, begin, end), Log.userid == person))
data = pd.read_sql(expression, engine)
return data
else:
expression = sqlselect([Log]).where(between(Log.timestamp, begin, end))
return pd.read_sql(expression, engine)
def create_graph(begin, length, graphtype, person=None):
end = calculate_end(begin, length)
data = select_data(begin, end, person)
data = data[['timestamp', 'ingame']]
data['timestamp'] = pd.to_datetime(data.timestamp, unit='s')
data = data.groupby('timestamp').sum().reset_index()
data['ingame'] = data['ingame'].astype(int)
try:
plt.figure()
data.plot(x='timestamp', y='ingame', drawstyle="steps", color='r', legend=False)
except TypeError:
return False # probably no data to plot
ax = plt.gca()
ax.yaxis.grid(which="major", color='#dddddd', linestyle='--', linewidth=1) # horizontal gridlines
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True)) # show y axis as ints
ax.spines['top'].set_visible(False) # remove top frame
ax.spines['right'].set_visible(False) # remove right frame
# time formatting
xtick_locator = dates.AutoDateLocator()
xtick_formatter = dates.AutoDateFormatter(xtick_locator)
ax.xaxis.set_major_locator(xtick_locator)
ax.xaxis.set_major_formatter(xtick_formatter)
xax = ax.get_xaxis() # get the x-axis
adf = xax.get_major_formatter() # the the auto-formatter
adf.scaled[1 / (24. * 60.)] = '%H:%M' # set the < 1d scale
adf.scaled[1. / 24] = '%d/%m %H:%M' # set the > 1d < 1m scale
adf.scaled[1.0] = '%Y-%m-%d' # set the > 1dm < 1y scale
adf.scaled[30.] = '%Y-%m' # set the > 1Y scale
adf.scaled[365.] = '%Y'
begin = data['timestamp'].iloc[0].strftime("%H:%M, %B %d %Y") # formatted starting date
end = data['timestamp'].iloc[-1].strftime("%H:%M, %B %d %Y") # formatted ending date
plt.title(f'{begin} - {end}', loc='right') # set dates as title
plt.xlabel('') # remove label x axis
plt.ylim(ymin=0) # always start at 0
plt.savefig('plot.png', bbox_inches='tight')
return True
def main():
print("Start date:")
day = int(input("DD > "))
month = int(input("MM > "))
year = int(input("YY > "))
begin = date_to_unix(0, day, month, year)
length = int(input("Length (days) > "))
graphtype = input("Type of graph > ")
create_graph(begin, length * 24, graphtype)
if __name__ == '__main__':
main()
| qtto/big-brother | plot.py | Python | mit | 3,440 | [
"ADF"
] | 49115467faf2f33192157496963810fced32de4cd9b394ff0dce25afcb9313d8 |
# Copyright (C) 2014
# Pierre de Buyl
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*******************************************************
**espressopp.interaction.LennardJones93Wall**
*******************************************************
This class defines a Lennard-Jones 9-3 SingleParticlePotential in the direction x.
.. math:: V(r) = \epsilon \left( \left(\frac{\sigma}{r}\right)^9 - \left(\frac{\sigma}{r}\right)^3 \right)
where :math:`r` is the distance from the lower or upper wall in the x
direction. :math:`V(r)=0` after a distance `sigmaCutoff`.
The parameters have to be defined for every species present in the system with
`setParams` and can be retrieved with `getParams`.
Example:
>>> LJ93 = espressopp.interaction.LennardJones93Wall()
>>> LJ93.setParams(0, 6., 1., wall_cutoff)
>>> SPLJ93 = espressopp.interaction.SingleParticleLennardJones93Wall(system, LJ93)
>>> system.addInteraction(SPLJ93)
.. function:: espressopp.interaction.LennardJones93Wall()
.. function:: espressopp.interaction.LennardJones93Wall.getParams(type_var)
:param type_var:
:type type_var:
:rtype:
.. function:: espressopp.interaction.LennardJones93Wall.setParams(type_var, epsilon, sigma, sigmaCutoff, r0)
:param type_var:
:param epsilon:
:param sigma:
:param sigmaCutoff:
:param r0:
:type type_var:
:type epsilon:
:type sigma:
:type sigmaCutoff:
:type r0:
.. function:: espressopp.interaction.SingleParticleLennardJones93Wall(system, potential)
:param system:
:param potential:
:type system:
:type potential:
.. function:: espressopp.interaction.SingleParticleLennardJones93Wall.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.SingleParticlePotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_LennardJones93Wall, interaction_SingleParticleLennardJones93Wall
class LennardJones93WallLocal(SingleParticlePotentialLocal, interaction_LennardJones93Wall):
def __init__(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_LennardJones93Wall)
def getParams(self, type_var):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getParams(self, type_var)
def setParams(self, type_var, epsilon, sigma, sigmaCutoff, r0):
self.cxxclass.setParams(self, type_var, epsilon, sigma, sigmaCutoff, r0)
class SingleParticleLennardJones93WallLocal(InteractionLocal, interaction_SingleParticleLennardJones93Wall):
def __init__(self, system, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_SingleParticleLennardJones93Wall, system, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class LennardJones93Wall(SingleParticlePotential):
'The LennardJones93Wall potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.LennardJones93WallLocal',
pmicall = ['setParams', 'getParams']
)
class SingleParticleLennardJones93Wall(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.SingleParticleLennardJones93WallLocal',
pmicall = ['setPotential']
)
| capoe/espressopp.soap | src/interaction/LennardJones93Wall.py | Python | gpl-3.0 | 4,584 | [
"ESPResSo"
] | 9b19ece6202ce1096ac7169f871ea5d0c9577fcce60052994d92da2e4a6b70a8 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import unittest
import os
import numpy as np
from pymatgen.analysis.elasticity.elastic import *
from pymatgen.analysis.elasticity.strain import Strain, IndependentStrain, Deformation
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.util.testing import PymatgenTest
from scipy.misc import central_diff_weights
import warnings
import json
import random
from six.moves import zip
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class ElasticTensorTest(PymatgenTest):
def setUp(self):
self.voigt_1 = [[59.33, 28.08, 28.08, 0, 0, 0],
[28.08, 59.31, 28.07, 0, 0, 0],
[28.08, 28.07, 59.32, 0, 0, 0],
[0, 0, 0, 26.35, 0, 0],
[0, 0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 0, 26.35]]
mat = np.random.randn(6, 6)
mat = mat + np.transpose(mat)
self.rand_elastic_tensor = ElasticTensor.from_voigt(mat)
self.ft = np.array([[[[59.33, 0, 0],
[0, 28.08, 0],
[0, 0, 28.08]],
[[0, 26.35, 0],
[26.35, 0, 0],
[0, 0, 0]],
[[0, 0, 26.35],
[0, 0, 0],
[26.35, 0, 0]]],
[[[0, 26.35, 0],
[26.35, 0, 0],
[0, 0, 0]],
[[28.08, 0, 0],
[0, 59.31, 0],
[0, 0, 28.07]],
[[0, 0, 0],
[0, 0, 26.35],
[0, 26.35, 0]]],
[[[0, 0, 26.35],
[0, 0, 0],
[26.35, 0, 0]],
[[0, 0, 0],
[0, 0, 26.35],
[0, 26.35, 0]],
[[28.08, 0, 0],
[0, 28.07, 0],
[0, 0, 59.32]]]])
self.elastic_tensor_1 = ElasticTensor(self.ft)
filepath = os.path.join(test_dir, 'Sn_def_stress.json')
with open(filepath) as f:
self.def_stress_dict = json.load(f)
self.structure = self.get_structure("Sn")
warnings.simplefilter("always")
def test_properties(self):
# compliance tensor
self.assertArrayAlmostEqual(np.linalg.inv(self.elastic_tensor_1.voigt),
self.elastic_tensor_1.compliance_tensor)
# KG average properties
self.assertAlmostEqual(38.49111111111, self.elastic_tensor_1.k_voigt)
self.assertAlmostEqual(22.05866666666, self.elastic_tensor_1.g_voigt)
self.assertAlmostEqual(38.49110945133, self.elastic_tensor_1.k_reuss)
self.assertAlmostEqual(20.67146635306, self.elastic_tensor_1.g_reuss)
self.assertAlmostEqual(38.49111028122, self.elastic_tensor_1.k_vrh)
self.assertAlmostEqual(21.36506650986, self.elastic_tensor_1.g_vrh)
# universal anisotropy
self.assertAlmostEqual(0.33553509658699,
self.elastic_tensor_1.universal_anisotropy)
# homogeneous poisson
self.assertAlmostEqual(0.26579965576472,
self.elastic_tensor_1.homogeneous_poisson)
# voigt notation tensor
self.assertArrayAlmostEqual(self.elastic_tensor_1.voigt,
self.voigt_1)
# young's modulus
self.assertAlmostEqual(54087787667.160583,
self.elastic_tensor_1.y_mod)
# prop dict
prop_dict = self.elastic_tensor_1.property_dict
self.assertAlmostEqual(prop_dict["homogeneous_poisson"], 0.26579965576)
for k, v in prop_dict.items():
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k), v)
def test_structure_based_methods(self):
# trans_velocity
self.assertAlmostEqual(1996.35019877,
self.elastic_tensor_1.trans_v(self.structure))
# long_velocity
self.assertAlmostEqual(3534.68123832,
self.elastic_tensor_1.long_v(self.structure))
# Snyder properties
self.assertAlmostEqual(18.06127074,
self.elastic_tensor_1.snyder_ac(self.structure))
self.assertAlmostEqual(0.18937465,
self.elastic_tensor_1.snyder_opt(self.structure))
self.assertAlmostEqual(18.25064540,
self.elastic_tensor_1.snyder_total(self.structure))
# Clarke
self.assertAlmostEqual(0.3450307,
self.elastic_tensor_1.clarke_thermalcond(self.structure))
# Cahill
self.assertAlmostEqual(0.37896275,
self.elastic_tensor_1.cahill_thermalcond(self.structure))
# Debye
self.assertAlmostEqual(247.3058931,
self.elastic_tensor_1.debye_temperature(self.structure))
self.assertAlmostEqual(189.05670205,
self.elastic_tensor_1.debye_temperature_gibbs(self.structure))
# structure-property dict
sprop_dict = self.elastic_tensor_1.get_structure_property_dict(self.structure)
self.assertAlmostEqual(sprop_dict["long_v"], 3534.68123832)
for k, v in sprop_dict.items():
if k=="structure":
self.assertEqual(v, self.structure)
else:
f = getattr(self.elastic_tensor_1, k)
if callable(f):
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k)(self.structure), v)
else:
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k), v)
def test_new(self):
self.assertArrayAlmostEqual(self.elastic_tensor_1,
ElasticTensor(self.ft))
nonsymm = self.ft
nonsymm[0, 1, 2, 2] += 1.0
with warnings.catch_warnings(record=True) as w:
ElasticTensor(nonsymm)
self.assertEqual(len(w), 1)
badtensor1 = np.zeros((3, 3, 3))
badtensor2 = np.zeros((3, 3, 3, 2))
self.assertRaises(ValueError, ElasticTensor, badtensor1)
self.assertRaises(ValueError, ElasticTensor, badtensor2)
def test_from_pseudoinverse(self):
strain_list = [Strain.from_deformation(def_matrix)
for def_matrix in self.def_stress_dict['deformations']]
stress_list = [stress for stress in self.def_stress_dict['stresses']]
with warnings.catch_warnings(record=True):
et_fl = -0.1*ElasticTensor.from_pseudoinverse(strain_list,
stress_list).voigt
self.assertArrayAlmostEqual(et_fl.round(2),
[[59.29, 24.36, 22.46, 0, 0, 0],
[28.06, 56.91, 22.46, 0, 0, 0],
[28.06, 25.98, 54.67, 0, 0, 0],
[0, 0, 0, 26.35, 0, 0],
[0, 0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 0, 26.35]])
def test_from_stress_dict(self):
stress_dict = dict(list(zip([IndependentStrain(def_matrix) for def_matrix
in self.def_stress_dict['deformations']],
[Stress(stress_matrix) for stress_matrix
in self.def_stress_dict['stresses']])))
minimal_sd = {k:v for k, v in stress_dict.items()
if (abs(k[k.ij] - 0.015) < 1e-10
or abs(k[k.ij] - 0.01005) < 1e-10)}
with warnings.catch_warnings(record = True):
et_from_sd = ElasticTensor.from_stress_dict(stress_dict)
et_from_minimal_sd = ElasticTensor.from_stress_dict(minimal_sd)
self.assertArrayAlmostEqual(et_from_sd.voigt_symmetrized.round(2),
self.elastic_tensor_1)
self.assertAlmostEqual(50.63394169, et_from_minimal_sd[0,0,0,0])
def test_energy_density(self):
film_elac = ElasticTensor.from_voigt([
[324.32, 187.3, 170.92, 0., 0., 0.],
[187.3, 324.32, 170.92, 0., 0., 0.],
[170.92, 170.92, 408.41, 0., 0., 0.],
[0., 0., 0., 150.73, 0., 0.],
[0., 0., 0., 0., 150.73, 0.],
[0., 0., 0., 0., 0., 238.74]])
dfm = Deformation([[ -9.86004855e-01,2.27539582e-01,-4.64426035e-17],
[ -2.47802121e-01,-9.91208483e-01,-7.58675185e-17],
[ -6.12323400e-17,-6.12323400e-17,1.00000000e+00]])
self.assertAlmostEqual(film_elac.energy_density(dfm.green_lagrange_strain),
0.000125664672793)
film_elac.energy_density(Strain.from_deformation([[ 0.99774738, 0.11520994, -0. ],
[-0.11520994, 0.99774738, 0. ],
[-0., -0., 1., ]]))
class ElasticTensorExpansionTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, 'test_toec_data.json')) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict['strains']]
self.pk_stresses = [Stress(d) for d in self.data_dict['pk_stresses']]
self.c2 = self.data_dict["C2_raw"]
self.c3 = self.data_dict["C3_raw"]
self.exp = ElasticTensorExpansion.from_voigt([self.c2, self.c3])
def test_init(self):
cijkl = Tensor.from_voigt(self.c2)
cijklmn = Tensor.from_voigt(self.c3)
exp = ElasticTensorExpansion([cijkl, cijklmn])
from_voigt = ElasticTensorExpansion.from_voigt([self.c2, self.c3])
self.assertEqual(exp.order, 3)
def test_from_diff_fit(self):
exp = ElasticTensorExpansion.from_diff_fit(self.strains, self.pk_stresses)
def test_calculate_stress(self):
calc_stress = self.exp.calculate_stress(self.strains[0])
self.assertArrayAlmostEqual(self.pk_stresses[0], calc_stress, decimal=2)
def test_energy_density(self):
self.exp.energy_density(self.strains[0])
class NthOrderElasticTensorTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, 'test_toec_data.json')) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict['strains']]
self.pk_stresses = [Stress(d) for d in self.data_dict['pk_stresses']]
self.c2 = NthOrderElasticTensor.from_voigt(self.data_dict["C2_raw"])
self.c3 = NthOrderElasticTensor.from_voigt(self.data_dict["C3_raw"])
def test_init(self):
c2 = NthOrderElasticTensor(self.c2.tolist())
c3 = NthOrderElasticTensor(self.c3.tolist())
c4 = NthOrderElasticTensor(np.zeros([3]*8))
for n, c in enumerate([c2, c3, c4]):
self.assertEqual(c.order, n+2)
self.assertRaises(ValueError, NthOrderElasticTensor, np.zeros([3]*5))
def test_from_diff_fit(self):
c3 = NthOrderElasticTensor.from_diff_fit(self.strains, self.pk_stresses,
eq_stress = self.data_dict["eq_stress"],
order=3)
self.assertArrayAlmostEqual(c3.voigt, self.data_dict["C3_raw"], decimal=2)
def test_calculate_stress(self):
calc_stress = self.c2.calculate_stress(self.strains[0])
self.assertArrayAlmostEqual(self.pk_stresses[0], calc_stress, decimal=0)
# Test calculation from voigt strain
calc_stress_voigt = self.c2.calculate_stress(self.strains[0].voigt)
def test_energy_density(self):
self.c3.energy_density(self.strains[0])
class DiffFitTest(PymatgenTest):
"""
Tests various functions related to diff fitting
"""
def setUp(self):
with open(os.path.join(test_dir, 'test_toec_data.json')) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict['strains']]
self.pk_stresses = [Stress(d) for d in self.data_dict['pk_stresses']]
def test_get_strain_state_dict(self):
strain_inds = [(0,), (1,), (2,), (1, 3), (1, 2, 3)]
vecs = {}
strain_states = []
for strain_ind in strain_inds:
ss = np.zeros(6)
np.put(ss, strain_ind, 1)
strain_states.append(tuple(ss))
vec = np.zeros((4, 6))
rand_values = np.random.uniform(0.1, 1, 4)
for i in strain_ind:
vec[:, i] = rand_values
vecs[strain_ind] = vec
all_strains = [Strain.from_voigt(v).zeroed() for vec in vecs.values()
for v in vec]
random.shuffle(all_strains)
all_stresses = [Stress.from_voigt(np.random.random(6)).zeroed()
for s in all_strains]
strain_dict = {k.tostring():v for k,v in zip(all_strains, all_stresses)}
ss_dict = get_strain_state_dict(all_strains, all_stresses, add_eq=False)
# Check length of ss_dict
self.assertEqual(len(strain_inds), len(ss_dict))
# Check sets of strain states are correct
self.assertEqual(set(strain_states), set(ss_dict.keys()))
for strain_state, data in ss_dict.items():
# Check correspondence of strains/stresses
for strain, stress in zip(data["strains"], data["stresses"]):
self.assertArrayAlmostEqual(Stress.from_voigt(stress),
strain_dict[Strain.from_voigt(strain).tostring()])
def test_find_eq_stress(self):
random_strains = [Strain.from_voigt(s) for s in np.random.uniform(0.1, 1, (20, 6))]
random_stresses = [Strain.from_voigt(s) for s in np.random.uniform(0.1, 1, (20, 6))]
with warnings.catch_warnings(record=True):
no_eq = find_eq_stress(random_strains, random_stresses)
self.assertArrayAlmostEqual(no_eq, np.zeros((3,3)))
random_strains[12] = Strain.from_voigt(np.zeros(6))
eq_stress = find_eq_stress(random_strains, random_stresses)
self.assertArrayAlmostEqual(random_stresses[12], eq_stress)
def test_get_diff_coeff(self):
forward_11 = get_diff_coeff([0, 1], 1)
forward_13 = get_diff_coeff([0, 1, 2, 3], 1)
backward_26 = get_diff_coeff(np.arange(-6, 1), 2)
central_29 = get_diff_coeff(np.arange(-4, 5), 2)
self.assertArrayAlmostEqual(forward_11, [-1, 1])
self.assertArrayAlmostEqual(forward_13, [-11./6, 3, -3./2, 1./3])
self.assertArrayAlmostEqual(backward_26, [137./180, -27./5,33./2,-254./9,
117./4,-87./5,203./45])
self.assertArrayAlmostEqual(central_29, central_diff_weights(9, 2))
def test_generate_pseudo(self):
strain_states = np.eye(6).tolist()
m2, abs = generate_pseudo(strain_states, order=2)
m3, abs = generate_pseudo(strain_states, order=3)
def test_fit(self):
cdf = diff_fit(self.strains, self.pk_stresses,
self.data_dict["eq_stress"])
reduced = [(e, pk) for e, pk in zip(self.strains, self.pk_stresses)
if not (abs(abs(e)-0.05)<1e-10).any()]
# Get reduced dataset
r_strains, r_pk_stresses = zip(*reduced)
with warnings.catch_warnings(record=True):
c2 = diff_fit(r_strains, r_pk_stresses,
self.data_dict["eq_stress"], order=2)
c2, c3, c4 = diff_fit(r_strains, r_pk_stresses,
self.data_dict["eq_stress"],
order=4)
c2, c3 = diff_fit(self.strains, self.pk_stresses,
self.data_dict["eq_stress"], order=3)
c2_red, c3_red = diff_fit(r_strains, r_pk_stresses,
self.data_dict["eq_stress"],
order=3)
self.assertArrayAlmostEqual(c2.voigt, self.data_dict["C2_raw"])
self.assertArrayAlmostEqual(c3.voigt, self.data_dict["C3_raw"], decimal=5)
self.assertArrayAlmostEqual(c2, c2_red, decimal=0)
self.assertArrayAlmostEqual(c3, c3_red, decimal=-1)
if __name__ == '__main__':
unittest.main()
| xhqu1981/pymatgen | pymatgen/analysis/elasticity/tests/test_elastic.py | Python | mit | 17,054 | [
"pymatgen"
] | a9a8bda8b5a11592442403044904ad1652f26c1e44e176195b1078256f4d1b8e |
# Copyright 2014, Brian Coca <bcoca@ansible.com>
# Copyright 2017, Ken Celenza <ken@networktocode.com>
# Copyright 2017, Jason Edelman <jason@networktocode.com>
# Copyright 2017, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import math
from jinja2.filters import environmentfilter
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common.text import formatters
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import zip, zip_longest
from ansible.module_utils.common._collections_compat import Hashable, Mapping, Iterable
from ansible.module_utils._text import to_native, to_text
from ansible.utils.display import Display
try:
from jinja2.filters import do_unique
HAS_UNIQUE = True
except ImportError:
HAS_UNIQUE = False
display = Display()
@environmentfilter
def unique(environment, a, case_sensitive=False, attribute=None):
def _do_fail(e):
if case_sensitive or attribute:
raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
"as it does not support the parameters supplied", orig_exc=e)
error = e = None
try:
if HAS_UNIQUE:
c = do_unique(environment, a, case_sensitive=case_sensitive, attribute=attribute)
if isinstance(a, Hashable):
c = set(c)
else:
c = list(c)
except TypeError as e:
error = e
_do_fail(e)
except Exception as e:
error = e
_do_fail(e)
display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
if not HAS_UNIQUE or error:
# handle Jinja2 specific attributes when using Ansible's version
if case_sensitive or attribute:
raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive nor attribute parameters, "
"you need a newer version of Jinja2 that provides their version of the filter.")
if isinstance(a, Hashable):
c = set(a)
else:
c = []
for x in a:
if x not in c:
c.append(x)
return c
@environmentfilter
def intersect(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) & set(b)
else:
c = unique(environment, [x for x in a if x in b])
return c
@environmentfilter
def difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) - set(b)
else:
c = unique(environment, [x for x in a if x not in b])
return c
@environmentfilter
def symmetric_difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) ^ set(b)
else:
isect = intersect(environment, a, b)
c = [x for x in union(environment, a, b) if x not in isect]
return c
@environmentfilter
def union(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) | set(b)
else:
c = unique(environment, a + b)
return c
def min(a):
_min = __builtins__.get('min')
return _min(a)
def max(a):
_max = __builtins__.get('max')
return _max(a)
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise AnsibleFilterError('log() can only be used on numbers: %s' % to_native(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise AnsibleFilterError('pow() can only be used on numbers: %s' % to_native(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except (ValueError, TypeError) as e:
raise AnsibleFilterError('root() can only be used on numbers: %s' % to_native(e))
def human_readable(size, isbits=False, unit=None):
''' Return a human readable string '''
try:
return formatters.bytes_to_human(size, isbits, unit)
except Exception:
raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
''' Return bytes count from a human readable string '''
try:
return formatters.human_to_bytes(size, default_unit, isbits)
except Exception:
raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
def rekey_on_member(data, key, duplicates='error'):
"""
Rekey a dict of dicts on another member
May also create a dict from a list of dicts.
duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
value would be duplicated or to overwrite previous entries if that's the case.
"""
if duplicates not in ('error', 'overwrite'):
raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
new_obj = {}
if isinstance(data, Mapping):
iterate_over = data.values()
elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
iterate_over = data
else:
raise AnsibleFilterError("Type is not a valid list, set, or dict")
for item in iterate_over:
if not isinstance(item, Mapping):
raise AnsibleFilterError("List item is not a valid dict")
try:
key_elem = item[key]
except KeyError:
raise AnsibleFilterError("Key {0} was not found".format(key))
except Exception as e:
raise AnsibleFilterError(to_native(e))
# Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
# minimum contain {key: key_elem}
if new_obj.get(key_elem, None):
if duplicates == 'error':
raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
elif duplicates == 'overwrite':
new_obj[key_elem] = item
else:
new_obj[key_elem] = item
return new_obj
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
filters = {
# general math
'min': min,
'max': max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'product': itertools.product,
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
'rekey_on_member': rekey_on_member,
# zip
'zip': zip,
'zip_longest': zip_longest,
}
return filters
| azaghal/ansible | lib/ansible/plugins/filter/mathstuff.py | Python | gpl-3.0 | 8,097 | [
"Brian"
] | 1cd0ef68bd0a649f7ac10b3ce48a91eec67e7e506090efc4b8f882fdeac53c08 |
"""
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
If None (default), s=len(w) which should be a good value if 1/w[i] is
an estimate of the standard deviation of y[i]. If 0, spline will
interpolate through all data points.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = UnivariateSpline(x, y, s=1)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y.
"""
def __init__(self, x, y, w=None, bbox = [None]*2, k=3, s=None):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
s - positive smoothing factor defined for
estimation condition:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
Default s=len(w) which should be a good value
if 1/w[i] is an estimate of the standard
deviation of y[i].
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1]==1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier==0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier==-1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier==-2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier==1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[n], nest) for n in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
"""
data = self._data
if data[6]==-1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1]==1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0):
""" Evaluate spline (or its nu-th derivative) at positions x.
Note: x can be unordered but the evaluation is more efficient
if x is (partially) ordered.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
return fitpack.splev(x, self._eval_args, der=nu)
def get_knots(self):
""" Return positions of (boundary and interior) knots of the spline.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline
approximation: ``sum((w[i] * (y[i]-s(x[i])))**2, axis=0)``.
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x."""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k==3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = InterpolatedUnivariateSpline(x, y)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
"""
def __init__(self, x, y, w=None, bbox = [None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order
and bbox[0]<t[0]<...<t[-1]<bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import LSQUnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = linspace(-3,3,100)
>>> y = exp(-x**2) + randn(100)/10
>>> t = [-1,0,1]
>>> s = LSQUnivariateSpline(x,y,t)
>>> xs = linspace(-3,3,1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
with knots [-3,-1,0,1,3]
"""
def __init__(self, x, y, t, w=None, bbox = [None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
t - 1-d sequence of the positions of user-defined
interior knots of the spline (t must be in strictly
ascending order and bbox[0]<t[0]<...<t[-1]<bbox[-1])
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb=bbox[0]
xe=bbox[1]
if xb is None: xb = x[0]
if xe is None: xe = x[-1]
t = concatenate(([xb]*(k+1),t,[xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0,axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
data = dfitpack.fpcurfm1(x,y,k,t,w=w,xb=xb,xe=xe)
self._data = data[:-3] + (None,None,data[-1])
self._reset_class()
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
def __call__(self, x, y, mth='array'):
""" Evaluate spline at the grid points defined by the coordinate arrays
x,y."""
x = np.asarray(x)
y = np.asarray(y)
# empty input yields empty output
if (x.size == 0) and (y.size == 0):
return array([])
if mth=='array':
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
return z
raise NotImplementedError('unknown method mth=%s' % mth)
def ev(self, xi, yi):
"""
Evaluate spline at points (x[i], y[i]), i=0,...,len(x)-1
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
zi,ier = dfitpack.bispeu(tx,ty,c,kx,ky,xi,yi)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
return zi
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\
xb,xe,yb,ye,\
kx,ky,eps,lwrk2=1)
if ier>10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\
xb,xe,yb,ye,\
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier<-2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if not ier in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi):
""" Evaluate the spline at the grid ponts defined by the coordinate
arrays theta, phi. """
theta = np.asarray(theta)
phi = np.asarray(phi)
# empty input yields empty output
if (theta.size == 0) and (phi.size == 0):
return array([])
if theta.min() < 0. or theta.max() > np.pi:
raise ValueError("requested theta out of bounds.")
if phi.min() < 0. or phi.max() > 2. * np.pi:
raise ValueError("requested phi out of bounds.")
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
z, ier = dfitpack.bispev(tx, ty, c, kx, ky, theta, phi)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
return z
def ev(self, thetai, phii):
""" Evaluate the spline at the points (theta[i], phi[i]),
i=0,...,len(theta)-1
"""
thetai = np.asarray(thetai)
phii = np.asarray(phii)
# empty input yields empty output
if (thetai.size == 0) and (phii.size == 0):
return array([])
if thetai.min() < 0. or thetai.max() > np.pi:
raise ValueError("requested thetai out of bounds.")
if phii.min() < 0. or phii.max() > 2. * np.pi:
raise ValueError("requested phii out of bounds.")
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
zi, ier = dfitpack.bispeu(tx, ty, c, kx, ky, thetai, phii)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
return zi
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),knotst,knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
>>> lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
>>> ax = fig2.add_subplot(2, 2, ii+1)
>>> ax.imshow(data_interp, interpolation='nearest')
>>> ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if not ier in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| sargas/scipy | scipy/interpolate/fitpack2.py | Python | bsd-3-clause | 48,821 | [
"Gaussian"
] | a03e67d6ec5d226f10e3677854ce647c8800158127c49ebd00ba594f5cfd9c6a |
import numpy as np
import scipy.misc
import argparse
from PIL import Image, ImageFont, ImageDraw
def main():
parser = argparse.ArgumentParser(
description = 'This program will generate a bmp of Gaussian Noise')
parser.add_argument('width', help='The width of the image',
type=int)
parser.add_argument('height', help='The height of the image',
type=int)
parser.add_argument('inFile', help='The input text, including the flag',
type=argparse.FileType('r'))
parser.add_argument('outFName', help='The name of the output image',
nargs='?', default='noiseImg.png')
args = parser.parse_args()
#Set up a numpy array to hold our image data (for mathy stuff):
imgArr = np.zeros((args.width, args.height, 3))
#First, make a background gradient, from 0,0 to N, N
for i in range(args.width):
for j in range(args.height):
greenVal = np.sin(np.radians((10 * i) % 360)) * 127 + 127
redVal = np.cos(np.radians((10 * j) % 360)) * 127 + 127
blueVal = (np.cos(np.radians((j * 360)/args.height
% 360)) * 127 + 127)
imgArr[i, j, 0] = redVal
imgArr[i, j, 1] = greenVal
imgArr[i,j,2] = blueVal
#Next, corrupt the image with red/green gaussian noise
cov = np.eye(3,3) * 0.01
cov[2][2] = 0
mean = np.zeros(3)
noiseArr = np.random.multivariate_normal(mean, cov,
(args.width, args.height))
noiseArr = noiseArr * 127 + 127
#data = np.random.normal(0, 0.01, (args.height, args.width))
imgArr += noiseArr
imgArr /= 2
imgArr = np.uint8(imgArr)
img = Image.fromarray(imgArr).convert('RGB')
font = ImageFont.truetype('/usr/share/fonts/truetype/droid/DroidSansMono.ttf', 20)
width, height = font.getsize('O') # Get width of monospace character
flagText = args.inFile.read()
#flagText = flagText.replace('\n','')
numUnits = args.width / width -1
writeText = encode(flagText)
#For viciousness, pad to the closest 8 bytes (with 0)
writeText += (len(writeText) % 8) * 'O'
draw = ImageDraw.Draw(img)
xCounter = 0
yCounter = 0
for char in writeText:
draw.text((xCounter * width, yCounter * height), char,
font=font)
xCounter += 1
if xCounter > numUnits:
xCounter = 0
yCounter += 1
#draw.text((0,0), writeText, font=font)
img.show()
img.save(args.outFName)
#'Arbitrarily' selected:
#data = np.random.normal(0, 0.01, (args.height, args.width))
#scipy.misc.imsave(args.outFName, imgArr)
#This will encode the input character string into a list of 0/O based
# in seven bit ASCII. O is 0, 1 is 0
def encode(text):
#Define the output string:
retStr = ''
#Iterate through each character in the string:
for char in text:
#Convert to a binary string:
binChar = '%07d' % int(bin(ord(char))[2:])
#replace all of the 0's with O
binChar = binChar.replace('0', 'O').replace('1', '0')
retStr += binChar
return retStr
def decode(ohStr):
#First, change to 0's and 1's:
ohStr = ohStr.replace('0', '1').replace('O', '0')
retStr = ''
#Next, go through each chunk of 7 bits:
count = 0
for i in range(len(ohStr)):
if(i % 7 == 0):
retStr += chr(int(ohStr[i: i + 7], 2))
count += 1
return retStr
if __name__ == '__main__':
main()
| mitre-cyber-academy/2013-forensics-500 | src/genNoiseImg.py | Python | apache-2.0 | 3,106 | [
"Gaussian"
] | 31c615ea9ba082c7e0d14e41e340e5e73224cee15e28c50b3633e2c27f0e2330 |
# -*- coding: utf-8 -*-
from __future__ import annotations
from statsmodels.compat.pandas import (
Appender,
Substitution,
call_cached_func,
to_numpy,
)
from collections.abc import Iterable
import datetime
import datetime as dt
from types import SimpleNamespace
from typing import Any, Literal, Sequence, cast
import warnings
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde, norm
import statsmodels.base.wrapper as wrap
from statsmodels.iolib.summary import Summary
from statsmodels.regression.linear_model import OLS
from statsmodels.tools import eval_measures
from statsmodels.tools.decorators import cache_readonly, cache_writable
from statsmodels.tools.docstring import Docstring, remove_parameters
from statsmodels.tools.sm_exceptions import SpecificationWarning
from statsmodels.tools.typing import (
ArrayLike,
ArrayLike1D,
ArrayLike2D,
Float64Array,
NDArray,
)
from statsmodels.tools.validation import (
array_like,
bool_like,
int_like,
string_like,
)
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tsa.base import tsa_model
from statsmodels.tsa.base.prediction import PredictionResults
from statsmodels.tsa.deterministic import (
DeterministicProcess,
DeterministicTerm,
Seasonality,
TimeTrend,
)
from statsmodels.tsa.tsatools import freq_to_period, lagmat
__all__ = ["AR", "AutoReg"]
AR_DEPRECATION_WARN = """
statsmodels.tsa.AR has been deprecated in favor of statsmodels.tsa.AutoReg and
statsmodels.tsa.SARIMAX.
AutoReg adds the ability to specify exogenous variables, include time trends,
and add seasonal dummies. The AutoReg API differs from AR since the model is
treated as immutable, and so the entire specification including the lag
length must be specified when creating the model. This change is too
substantial to incorporate into the existing AR api. The function
ar_select_order performs lag length selection for AutoReg models.
AutoReg only estimates parameters using conditional MLE (OLS). Use SARIMAX to
estimate ARX and related models using full MLE via the Kalman Filter.
To silence this warning and continue using AR until it is removed, use:
import warnings
warnings.filterwarnings('ignore', 'statsmodels.tsa.ar_model.AR', FutureWarning)
"""
REPEATED_FIT_ERROR = """
Model has been fit using maxlag={0}, method={1}, ic={2}, trend={3}. These
cannot be changed in subsequent calls to `fit`. Instead, use a new instance of
AR.
"""
def sumofsq(x: np.ndarray, axis: int = 0) -> float | np.ndarray:
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=axis)
def _get_period(data: pd.DatetimeIndex | pd.PeriodIndex, index_freq) -> int:
"""Shared helper to get period from frequenc or raise"""
if data.freq:
return freq_to_period(index_freq)
raise ValueError(
"freq cannot be inferred from endog and model includes seasonal "
"terms. The number of periods must be explicitly set when the "
"endog's index does not contain a frequency."
)
class AutoReg(tsa_model.TimeSeriesModel):
"""
Autoregressive AR-X(p) model
Estimate an AR-X model using Conditional Maximum Likelihood (OLS).
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The dependent variable.
lags : {None, int, list[int]}
The number of lags to include in the model if an integer or the
list of lag indices to include. For example, [1, 4] will only
include lags 1 and 4 while lags=4 will include lags 1, 2, 3, and 4.
None excludes all AR lags, and behave identically to 0.
trend : {'n', 'c', 't', 'ct'}
The trend to include in the model:
* 'n' - No trend.
* 'c' - Constant only.
* 't' - Time trend only.
* 'ct' - Constant and time trend.
seasonal : bool
Flag indicating whether to include seasonal dummies in the model. If
seasonal is True and trend includes 'c', then the first period
is excluded from the seasonal terms.
exog : array_like, optional
Exogenous variables to include in the model. Must have the same number
of observations as endog and should be aligned so that endog[i] is
regressed on exog[i].
hold_back : {None, int}
Initial observations to exclude from the estimation sample. If None,
then hold_back is equal to the maximum lag in the model. Set to a
non-zero value to produce comparable models with different lag
length. For example, to compare the fit of a model with lags=3 and
lags=1, set hold_back=3 which ensures that both models are estimated
using observations 3,...,nobs. hold_back must be >= the maximum lag in
the model.
period : {None, int}
The period of the data. Only used if seasonal is True. This parameter
can be omitted if using a pandas object for endog that contains a
recognized frequency.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none'.
deterministic : DeterministicProcess
A deterministic process. If provided, trend and seasonal are ignored.
A warning is raised if trend is not "n" and seasonal is not False.
old_names : bool
Flag indicating whether to use the v0.11 names or the v0.12+ names.
.. deprecated:: 0.13
old_names is deprecated and will be removed after 0.14 is
released. You must update any code reliant on the old variable
names to use the new names.
See Also
--------
statsmodels.tsa.statespace.sarimax.SARIMAX
Estimation of SARIMAX models using exact likelihood and the
Kalman Filter.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.tsa.ar_model import AutoReg
>>> data = sm.datasets.sunspots.load_pandas().data['SUNACTIVITY']
>>> out = 'AIC: {0:0.3f}, HQIC: {1:0.3f}, BIC: {2:0.3f}'
Start by fitting an unrestricted Seasonal AR model
>>> res = AutoReg(data, lags = [1, 11, 12]).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 5.945, HQIC: 5.970, BIC: 6.007
An alternative used seasonal dummies
>>> res = AutoReg(data, lags=1, seasonal=True, period=11).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 6.017, HQIC: 6.080, BIC: 6.175
Finally, both the seasonal AR structure and dummies can be included
>>> res = AutoReg(data, lags=[1, 11, 12], seasonal=True, period=11).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 5.884, HQIC: 5.959, BIC: 6.071
"""
_y: Float64Array
def __init__(
self,
endog: ArrayLike1D,
lags: int | Sequence[int] | None,
trend: Literal["n", "c", "t", "ct"] = "c",
seasonal: bool = False,
exog: ArrayLike2D | None = None,
hold_back: int | None = None,
period: int | None = None,
missing: str = "none",
*,
deterministic: DeterministicProcess | None = None,
old_names: bool = False,
):
super().__init__(endog, exog, None, None, missing=missing)
self._trend = cast(
Literal["n", "c", "t", "ct"],
string_like(
trend, "trend", options=("n", "c", "t", "ct"), optional=False
),
)
self._seasonal = bool_like(seasonal, "seasonal")
self._period = int_like(period, "period", optional=True)
if self._period is None and self._seasonal:
self._period = _get_period(self.data, self._index_freq)
terms: list[DeterministicTerm] = [TimeTrend.from_string(self._trend)]
if seasonal:
assert isinstance(self._period, int)
terms.append(Seasonality(self._period))
if hasattr(self.data.orig_endog, "index"):
index = self.data.orig_endog.index
else:
index = np.arange(self.data.endog.shape[0])
self._user_deterministic = False
if deterministic is not None:
if not isinstance(deterministic, DeterministicProcess):
raise TypeError("deterministic must be a DeterministicProcess")
self._deterministics = deterministic
self._user_deterministic = True
else:
self._deterministics = DeterministicProcess(
index, additional_terms=terms
)
self._exog_names: list[str] = []
self._k_ar = 0
self._old_names = bool_like(old_names, "old_names", optional=False)
if deterministic is not None and (
self._trend != "n" or self._seasonal
):
warnings.warn(
'When using deterministic, trend must be "n" and '
"seasonal must be False.",
SpecificationWarning,
stacklevel=2,
)
if self._old_names:
warnings.warn(
"old_names will be removed after the 0.14 release. You should "
"stop setting this parameter and use the new names.",
FutureWarning,
stacklevel=2,
)
self._lags, self._hold_back = self._check_lags(
lags, int_like(hold_back, "hold_back", optional=True)
)
self._setup_regressors()
self.nobs = self._y.shape[0]
self.data.xnames = self.exog_names
@property
def ar_lags(self) -> list[int] | None:
"""The autoregressive lags included in the model"""
lags = list(self._lags)
return None if not lags else lags
@property
def hold_back(self) -> int | None:
"""The number of initial obs. excluded from the estimation sample."""
return self._hold_back
@property
def trend(self) -> Literal["n", "c", "ct", "ctt"]:
"""The trend used in the model."""
return self._trend
@property
def seasonal(self) -> bool:
"""Flag indicating that the model contains a seasonal component."""
return self._seasonal
@property
def deterministic(self) -> DeterministicProcess | None:
"""The deterministic used to construct the model"""
return self._deterministics if self._user_deterministic else None
@property
def period(self) -> int | None:
"""The period of the seasonal component."""
return self._period
@property
def df_model(self) -> int:
"""The model degrees of freedom."""
return self._x.shape[1]
@property
def exog_names(self) -> list[str] | None:
"""Names of exogenous variables included in model"""
return self._exog_names
def initialize(self) -> None:
"""Initialize the model (no-op)."""
pass
def _check_lags(
self, lags: int | Sequence[int] | None, hold_back: int | None
) -> tuple[list[int], int]:
if lags is None:
_lags: list[int] = []
self._maxlag = 0
elif isinstance(lags, Iterable):
_lags = []
for lag in lags:
val = int_like(lag, "lags")
assert isinstance(val, int)
_lags.append(val)
_lags_arr: NDArray = np.array(sorted(_lags))
print(_lags_arr)
if (
np.any(_lags_arr < 1)
or np.unique(_lags_arr).shape[0] != _lags_arr.shape[0]
):
raise ValueError(
"All values in lags must be positive and distinct."
)
self._maxlag = np.max(_lags_arr)
_lags = [int(v) for v in _lags_arr]
else:
val = int_like(lags, "lags")
assert isinstance(val, int)
self._maxlag = val
if self._maxlag < 0:
raise ValueError("lags must be a non-negative scalar.")
_lags_arr = np.arange(1, self._maxlag + 1)
_lags = [int(v) for v in _lags_arr]
if hold_back is None:
hold_back = self._maxlag
if hold_back < self._maxlag:
raise ValueError(
"hold_back must be >= lags if lags is an int or"
"max(lags) if lags is array_like."
)
return _lags, int(hold_back)
def _setup_regressors(self) -> None:
maxlag = self._maxlag
hold_back = self._hold_back
exog_names = []
endog_names = self.endog_names
x, y = lagmat(self.endog, maxlag, original="sep")
exog_names.extend(
[endog_names + ".L{0}".format(lag) for lag in self._lags]
)
if len(self._lags) < maxlag:
x = x[:, np.asarray(self._lags) - 1]
self._k_ar = x.shape[1]
deterministic = self._deterministics.in_sample()
if deterministic.shape[1]:
x = np.c_[to_numpy(deterministic), x]
if self._old_names:
deterministic_names = []
if "c" in self._trend:
deterministic_names.append("intercept")
if "t" in self._trend:
deterministic_names.append("trend")
if self._seasonal:
period = self._period
assert isinstance(period, int)
names = ["seasonal.{0}".format(i) for i in range(period)]
if "c" in self._trend:
names = names[1:]
deterministic_names.extend(names)
else:
deterministic_names = list(deterministic.columns)
exog_names = deterministic_names + exog_names
if self.exog is not None:
x = np.c_[x, self.exog]
exog_names.extend(self.data.param_names)
y = y[hold_back:]
x = x[hold_back:]
if y.shape[0] < x.shape[1]:
reg = x.shape[1]
period = self._period
trend = 0 if self._trend == "n" else len(self._trend)
if self._seasonal:
assert isinstance(period, int)
seas = period - int("c" in self._trend)
else:
seas = 0
lags = len(self._lags)
nobs = y.shape[0]
raise ValueError(
"The model specification cannot be estimated. "
f"The model contains {reg} regressors ({trend} trend, "
f"{seas} seasonal, {lags} lags) but after adjustment "
"for hold_back and creation of the lags, there "
f"are only {nobs} data points available to estimate "
"parameters."
)
self._y, self._x = y, x
self._exog_names = exog_names
def fit(
self,
cov_type: str = "nonrobust",
cov_kwds: dict[str, Any] | None = None,
use_t: bool = False,
) -> AutoRegResultsWrapper:
"""
Estimate the model parameters.
Parameters
----------
cov_type : str
The covariance estimator to use. The most common choices are listed
below. Supports all covariance estimators that are available
in ``OLS.fit``.
* 'nonrobust' - The class OLS covariance estimator that assumes
homoskedasticity.
* 'HC0', 'HC1', 'HC2', 'HC3' - Variants of White's
(or Eiker-Huber-White) covariance estimator. `HC0` is the
standard implementation. The other make corrections to improve
the finite sample performance of the heteroskedasticity robust
covariance estimator.
* 'HAC' - Heteroskedasticity-autocorrelation robust covariance
estimation. Supports cov_kwds.
- `maxlags` integer (required) : number of lags to use.
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett.
- `use_correction` bool (optional) : If true, use small sample
correction.
cov_kwds : dict, optional
A dictionary of keyword arguments to pass to the covariance
estimator. `nonrobust` and `HC#` do not support cov_kwds.
use_t : bool, optional
A flag indicating that inference should use the Student's t
distribution that accounts for model degree of freedom. If False,
uses the normal distribution. If None, defers the choice to
the cov_type. It also removes degree of freedom corrections from
the covariance estimator when cov_type is 'nonrobust'.
Returns
-------
AutoRegResults
Estimation results.
See Also
--------
statsmodels.regression.linear_model.OLS
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.RegressionResults
See ``get_robustcov_results`` for a detailed list of available
covariance estimators and options.
Notes
-----
Use ``OLS`` to estimate model parameters and to estimate parameter
covariance.
"""
# TODO: Determine correction for degree-of-freedom
# Special case parameterless model
if self._x.shape[1] == 0:
return AutoRegResultsWrapper(
AutoRegResults(self, np.empty(0), np.empty((0, 0)))
)
ols_mod = OLS(self._y, self._x)
ols_res = ols_mod.fit(
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t
)
cov_params = ols_res.cov_params()
use_t = ols_res.use_t
if cov_type == "nonrobust" and not use_t:
nobs = self._y.shape[0]
k = self._x.shape[1]
scale = nobs / (nobs - k)
cov_params /= scale
res = AutoRegResults(
self,
ols_res.params,
cov_params,
ols_res.normalized_cov_params,
use_t=use_t,
)
return AutoRegResultsWrapper(res)
def _resid(self, params: ArrayLike) -> np.ndarray:
params = array_like(params, "params", ndim=2)
return self._y.squeeze() - (self._x @ params).squeeze()
def loglike(self, params: ArrayLike) -> float:
"""
Log-likelihood of model.
Parameters
----------
params : ndarray
The model parameters used to compute the log-likelihood.
Returns
-------
float
The log-likelihood value.
"""
nobs = self.nobs
resid = self._resid(params)
ssr = resid @ resid
llf = -(nobs / 2) * (np.log(2 * np.pi) + np.log(ssr / nobs) + 1)
return llf
def score(self, params: ArrayLike) -> np.ndarray:
"""
Score vector of model.
The gradient of logL with respect to each parameter.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The score vector evaluated at the parameters.
"""
resid = self._resid(params)
return self._x.T @ resid
def information(self, params: ArrayLike) -> np.ndarray:
"""
Fisher information matrix of model.
Returns -1 * Hessian of the log-likelihood evaluated at params.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
ndarray
The information matrix.
"""
resid = self._resid(params)
sigma2 = resid @ resid / self.nobs
return (self._x.T @ self._x) * (1 / sigma2)
def hessian(self, params: ArrayLike) -> np.ndarray:
"""
The Hessian matrix of the model.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The hessian evaluated at the parameters.
"""
return -self.information(params)
def _setup_oos_forecast(
self, add_forecasts: int, exog_oos: ArrayLike2D
) -> np.ndarray:
x = np.zeros((add_forecasts, self._x.shape[1]))
oos_exog = self._deterministics.out_of_sample(steps=add_forecasts)
n_deterministic = oos_exog.shape[1]
x[:, :n_deterministic] = to_numpy(oos_exog)
# skip the AR columns
loc = n_deterministic + len(self._lags)
if self.exog is not None:
exog_oos_a = np.asarray(exog_oos)
x[:, loc:] = exog_oos_a[:add_forecasts]
return x
def _wrap_prediction(
self, prediction: np.ndarray, start: int, end: int, pad: int
) -> pd.Series:
prediction = np.hstack([np.full(pad, np.nan), prediction])
n_values = end - start + pad
if not isinstance(self.data.orig_endog, (pd.Series, pd.DataFrame)):
return prediction[-n_values:]
index = self._index
if end > self.endog.shape[0]:
freq = getattr(index, "freq", None)
if freq:
if isinstance(index, pd.PeriodIndex):
index = pd.period_range(index[0], freq=freq, periods=end)
else:
index = pd.date_range(index[0], freq=freq, periods=end)
else:
index = pd.RangeIndex(end)
index = index[start - pad : end]
prediction = prediction[-n_values:]
return pd.Series(prediction, index=index)
def _dynamic_predict(
self,
params: ArrayLike,
start: int,
end: int,
dynamic: int,
num_oos: int,
exog: Float64Array | None,
exog_oos: Float64Array | None,
) -> pd.Series:
"""
:param params:
:param start:
:param end:
:param dynamic:
:param num_oos:
:param exog:
:param exog_oos:
:return:
"""
reg = []
hold_back = self._hold_back
adj = 0
if start < hold_back:
# Adjust start and dynamic
adj = hold_back - start
start += adj
# New offset shifts, but must remain non-negative
dynamic = max(dynamic - adj, 0)
if (start - hold_back) <= self.nobs:
# _x is missing hold_back observations, which is why
# it is shifted by this amount
is_loc = slice(start - hold_back, end + 1 - hold_back)
x = self._x[is_loc]
if exog is not None:
x = x.copy()
# Replace final columns
x[:, -exog.shape[1] :] = exog[start : end + 1]
reg.append(x)
if num_oos > 0:
reg.append(self._setup_oos_forecast(num_oos, exog_oos))
_reg = np.vstack(reg)
det_col_idx = self._x.shape[1] - len(self._lags)
det_col_idx -= 0 if self.exog is None else self.exog.shape[1]
# Simple 1-step static forecasts for dynamic observations
forecasts = np.empty(_reg.shape[0])
forecasts[:dynamic] = _reg[:dynamic] @ params
for h in range(dynamic, _reg.shape[0]):
# Fill in regressor matrix
for j, lag in enumerate(self._lags):
fcast_loc = h - lag
if fcast_loc >= dynamic:
val = forecasts[fcast_loc]
else:
# If before the start of the forecasts, use actual values
val = self.endog[fcast_loc + start]
_reg[h, det_col_idx + j] = val
forecasts[h] = _reg[h : h + 1] @ params
return self._wrap_prediction(forecasts, start, end + 1 + num_oos, adj)
def _static_oos_predict(
self, params: ArrayLike, num_oos: int, exog_oos: ArrayLike2D
) -> np.ndarray:
new_x = self._setup_oos_forecast(num_oos, exog_oos)
if self._maxlag == 0:
return new_x @ params
forecasts = np.empty(num_oos)
nexog = 0 if self.exog is None else self.exog.shape[1]
ar_offset = self._x.shape[1] - nexog - len(self._lags)
for i in range(num_oos):
for j, lag in enumerate(self._lags):
loc = i - lag
val = self._y[loc] if loc < 0 else forecasts[loc]
new_x[i, ar_offset + j] = val
forecasts[i] = new_x[i : i + 1] @ params
return forecasts
def _static_predict(
self,
params: Float64Array,
start: int,
end: int,
num_oos: int,
exog: Float64Array | None,
exog_oos: Float64Array | None,
) -> pd.Series:
"""
Path for static predictions
Parameters
----------
params : ndarray
The model parameters
start : int
Index of first observation
end : int
Index of last in-sample observation. Inclusive, so start:end+1
in slice notation.
num_oos : int
Number of out-of-sample observations, so that the returned size is
num_oos + (end - start + 1).
exog : {ndarray, DataFrame}
Array containing replacement exog values
exog_oos : {ndarray, DataFrame}
Containing forecast exog values
"""
hold_back = self._hold_back
nobs = self.endog.shape[0]
x = np.empty((0, self._x.shape[1]))
# Adjust start to reflect observations lost
adj = max(0, hold_back - start)
start += adj
if start <= nobs:
# Use existing regressors
is_loc = slice(start - hold_back, end + 1 - hold_back)
x = self._x[is_loc]
if exog is not None:
exog_a = np.asarray(exog)
x = x.copy()
# Replace final columns
x[:, -exog_a.shape[1] :] = exog_a[start : end + 1]
in_sample = x @ params
if num_oos == 0: # No out of sample
return self._wrap_prediction(in_sample, start, end + 1, adj)
out_of_sample = self._static_oos_predict(params, num_oos, exog_oos)
prediction = np.hstack((in_sample, out_of_sample))
return self._wrap_prediction(prediction, start, end + 1 + num_oos, adj)
def _prepare_prediction(
self,
params: ArrayLike,
exog: ArrayLike2D,
exog_oos: ArrayLike2D,
start: int | str | datetime.datetime | pd.Timestamp | None,
end: int | str | datetime.datetime | pd.Timestamp | None,
) -> tuple[
np.ndarray,
np.ndarray | pd.DataFrame | None,
np.ndarray | pd.DataFrame | None,
int,
int,
int,
]:
params = array_like(params, "params")
assert isinstance(params, np.ndarray)
if isinstance(exog, pd.DataFrame):
_exog = exog
else:
_exog = array_like(exog, "exog", ndim=2, optional=True)
if isinstance(exog_oos, pd.DataFrame):
_exog_oos = exog_oos
else:
_exog_oos = array_like(exog_oos, "exog_oos", ndim=2, optional=True)
start = 0 if start is None else start
end = self._index[-1] if end is None else end
start, end, num_oos, _ = self._get_prediction_index(start, end)
return params, _exog, _exog_oos, start, end, num_oos
def _parse_dynamic(self, dynamic, start):
if isinstance(
dynamic, (str, bytes, pd.Timestamp, dt.datetime, pd.Period)
):
dynamic_loc, _, _ = self._get_index_loc(dynamic)
# Adjust since relative to start
dynamic_loc -= start
elif dynamic is True:
# if True, all forecasts are dynamic
dynamic_loc = 0
else:
dynamic_loc = int(dynamic)
# At this point dynamic is an offset relative to start
# and it must be non-negative
if dynamic_loc < 0:
raise ValueError(
"Dynamic prediction cannot begin prior to the "
"first observation in the sample."
)
return dynamic_loc
def predict(
self,
params: ArrayLike,
start: int | str | datetime.datetime | pd.Timestamp | None = None,
end: int | str | datetime.datetime | pd.Timestamp | None = None,
dynamic: bool | int = False,
exog: ArrayLike2D | None = None,
exog_oos: ArrayLike2D | None = None,
) -> pd.Series:
"""
In-sample prediction and out-of-sample forecasting.
Parameters
----------
params : array_like
The fitted model parameters.
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
predictions : {ndarray, Series}
Array of out of in-sample predictions and / or out-of-sample
forecasts.
"""
params, exog, exog_oos, start, end, num_oos = self._prepare_prediction(
params, exog, exog_oos, start, end
)
if self.exog is None and (exog is not None or exog_oos is not None):
raise ValueError(
"exog and exog_oos cannot be used when the model "
"does not contains exogenous regressors."
)
elif self.exog is not None:
if exog is not None and exog.shape != self.exog.shape:
msg = (
"The shape of exog {0} must match the shape of the "
"exog variable used to create the model {1}."
)
raise ValueError(msg.format(exog.shape, self.exog.shape))
if (
exog_oos is not None
and exog_oos.shape[1] != self.exog.shape[1]
):
msg = (
"The number of columns in exog_oos ({0}) must match "
"the number of columns in the exog variable used to "
"create the model ({1})."
)
raise ValueError(
msg.format(exog_oos.shape[1], self.exog.shape[1])
)
if num_oos > 0 and exog_oos is None:
raise ValueError(
"exog_oos must be provided when producing "
"out-of-sample forecasts."
)
elif exog_oos is not None and num_oos > exog_oos.shape[0]:
msg = (
"start and end indicate that {0} out-of-sample "
"predictions must be computed. exog_oos has {1} rows "
"but must have at least {0}."
)
raise ValueError(msg.format(num_oos, exog_oos.shape[0]))
if (isinstance(dynamic, bool) and not dynamic) or self._maxlag == 0:
# If model has no lags, static and dynamic are identical
return self._static_predict(
params, start, end, num_oos, exog, exog_oos
)
dynamic = self._parse_dynamic(dynamic, start)
return self._dynamic_predict(
params, start, end, dynamic, num_oos, exog, exog_oos
)
class AR:
"""
The AR class has been removed and replaced with AutoReg
See Also
--------
AutoReg
The replacement for AR that improved deterministic modeling
"""
def __init__(self, *args, **kwargs):
raise NotImplementedError(
"AR has been removed from statsmodels and replaced with "
"statsmodels.tsa.ar_model.AutoReg."
)
class ARResults:
"""
Removed and replaced by AutoRegResults.
See Also
--------
AutoReg
"""
def __init__(self, *args, **kwargs):
raise NotImplementedError(
"AR and ARResults have been removed and replaced by "
"AutoReg And AutoRegResults."
)
doc = Docstring(AutoReg.predict.__doc__)
_predict_params = doc.extract_parameters(
["start", "end", "dynamic", "exog", "exog_oos"], 8
)
class AutoRegResults(tsa_model.TimeSeriesModelResults):
"""
Class to hold results from fitting an AutoReg model.
Parameters
----------
model : AutoReg
Reference to the model that is fit.
params : ndarray
The fitted parameters from the AR Model.
cov_params : ndarray
The estimated covariance matrix of the model parameters.
normalized_cov_params : ndarray
The array inv(dot(x.T,x)) where x contains the regressors in the
model.
scale : float, optional
An estimate of the scale of the model.
use_t : bool, optional
Whether use_t was set in fit
summary_text : str, optional
Additional text to append to results summary
"""
_cache: dict[str, Any] = {} # for scale setter
def __init__(
self,
model,
params,
cov_params,
normalized_cov_params=None,
scale=1.0,
use_t=False,
summary_text="",
):
super().__init__(model, params, normalized_cov_params, scale)
self._cache = {}
self._params = params
self._nobs = model.nobs
self._n_totobs = model.endog.shape[0]
self._df_model = model.df_model
self._ar_lags = model.ar_lags
self._use_t = use_t
if self._ar_lags is not None:
self._max_lag = max(self._ar_lags)
else:
self._max_lag = 0
self._hold_back = self.model.hold_back
self.cov_params_default = cov_params
self._summary_text = summary_text
def initialize(self, model, params, **kwargs):
"""
Initialize (possibly re-initialize) a Results instance.
Parameters
----------
model : Model
The model instance.
params : ndarray
The model parameters.
**kwargs
Any additional keyword arguments required to initialize the model.
"""
self._params = params
self.model = model
@property
def ar_lags(self):
"""The autoregressive lags included in the model"""
return self._ar_lags
@property
def params(self):
"""The estimated parameters."""
return self._params
@property
def df_model(self):
"""The degrees of freedom consumed by the model."""
return self._df_model
@property
def df_resid(self):
"""The remaining degrees of freedom in the residuals."""
return self.nobs - self._df_model
@property
def nobs(self):
"""
The number of observations after adjusting for losses due to lags.
"""
return self._nobs
@cache_writable()
def sigma2(self):
return 1.0 / self.nobs * sumofsq(self.resid)
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
"""
The standard errors of the estimated parameters.
If `method` is 'cmle', then the standard errors that are returned are
the OLS standard errors of the coefficients. If the `method` is 'mle'
then they are computed using the numerical Hessian.
"""
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
r"""
Akaike Information Criterion using Lutkepohl's definition.
:math:`-2 llf + \ln(nobs) (1 + df_{model})`
"""
# This is based on loglike with dropped constant terms ?
# Lutkepohl
# return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
# Stata defintion
# nobs = self.nobs
# return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
return eval_measures.aic(self.llf, self.nobs, self.df_model + 1)
@cache_readonly
def hqic(self):
r"""
Hannan-Quinn Information Criterion using Lutkepohl's definition.
:math:`-2 llf + 2 \ln(\ln(nobs)) (1 + df_{model})`
"""
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
# Stata
# nobs = self.nobs
# return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
return eval_measures.hqic(self.llf, self.nobs, self.df_model + 1)
@cache_readonly
def fpe(self):
r"""
Final prediction error using Lütkepohl's definition.
:math:`((nobs+df_{model})/(nobs-df_{model})) \sigma^2`
"""
nobs = self.nobs
df_model = self.df_model
# Lutkepohl
return self.sigma2 * ((nobs + df_model) / (nobs - df_model))
@cache_readonly
def aicc(self):
r"""
Akaike Information Criterion with small sample correction
:math:`2.0 * df_{model} * nobs / (nobs - df_{model} - 1.0)`
"""
return eval_measures.aicc(self.llf, self.nobs, self.df_model + 1)
@cache_readonly
def bic(self):
r"""
Bayes Information Criterion
:math:`-2 llf + \ln(nobs) (1 + df_{model})`
"""
# Lutkepohl
# np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
# Stata
# -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + self.k_trend)
return eval_measures.bic(self.llf, self.nobs, self.df_model + 1)
@cache_readonly
def resid(self):
"""
The residuals of the model.
"""
model = self.model
endog = model.endog.squeeze()
return endog[self._hold_back :] - self.fittedvalues
def _lag_repr(self):
"""Returns poly repr of an AR, (1 -phi1 L -phi2 L^2-...)"""
ar_lags = self._ar_lags if self._ar_lags is not None else []
k_ar = len(ar_lags)
ar_params = np.zeros(self._max_lag + 1)
ar_params[0] = 1
df_model = self._df_model
exog = self.model.exog
k_exog = exog.shape[1] if exog is not None else 0
params = self._params[df_model - k_ar - k_exog : df_model - k_exog]
for i, lag in enumerate(ar_lags):
ar_params[lag] = -params[i]
return ar_params
@cache_readonly
def roots(self):
"""
The roots of the AR process.
The roots are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.
Stability requires that the roots in modulus lie outside the unit
circle.
"""
# TODO: Specific to AR
lag_repr = self._lag_repr()
if lag_repr.shape[0] == 1:
return np.empty(0)
return np.roots(lag_repr) ** -1
@cache_readonly
def arfreq(self):
r"""
Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
# TODO: Specific to AR
z = self.roots
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def fittedvalues(self):
"""
The in-sample predicted values of the fitted AR model.
The `k_ar` initial values are computed via the Kalman Filter if the
model is fit by `mle`.
"""
return self.model.predict(self.params)[self._hold_back :]
def test_serial_correlation(self, lags=None, model_df=None):
"""
Ljung-Box test for residual serial correlation
Parameters
----------
lags : int
The maximum number of lags to use in the test. Jointly tests that
all autocorrelations up to and including lag j are zero for
j = 1, 2, ..., lags. If None, uses min(10, nobs // 5).
model_df : int
The model degree of freedom to use when adjusting computing the
test statistic to account for parameter estimation. If None, uses
the number of AR lags included in the model.
Returns
-------
output : DataFrame
DataFrame containing three columns: the test statistic, the
p-value of the test, and the degree of freedom used in the test.
Notes
-----
Null hypothesis is no serial correlation.
The the test degree-of-freedom is 0 or negative once accounting for
model_df, then the test statistic's p-value is missing.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation.
"""
# Deferred to prevent circular import
from statsmodels.stats.diagnostic import acorr_ljungbox
lags = int_like(lags, "lags", optional=True)
model_df = int_like(model_df, "df_model", optional=True)
model_df = self.df_model if model_df is None else model_df
nobs_effective = self.resid.shape[0]
if lags is None:
lags = min(nobs_effective // 5, 10)
test_stats = acorr_ljungbox(
self.resid,
lags=lags,
boxpierce=False,
model_df=model_df,
)
cols = ["Ljung-Box", "LB P-value", "DF"]
if lags == 1:
df = max(0, 1 - model_df)
else:
df = np.clip(np.arange(1, lags + 1) - model_df, 0, np.inf)
df = df.astype(int)
test_stats["df"] = df
index = pd.RangeIndex(1, lags + 1, name="Lag")
return pd.DataFrame(test_stats, columns=cols, index=index)
def test_normality(self):
"""
Test for normality of standardized residuals.
Returns
-------
Series
Series containing four values, the test statistic and its p-value,
the skewness and the kurtosis.
Notes
-----
Null hypothesis is normality.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality.
"""
# Deferred to prevent circular import
from statsmodels.stats.stattools import jarque_bera
index = ["Jarque-Bera", "P-value", "Skewness", "Kurtosis"]
return pd.Series(jarque_bera(self.resid), index=index)
def test_heteroskedasticity(self, lags=None):
"""
ARCH-LM test of residual heteroskedasticity
Parameters
----------
lags : int
The maximum number of lags to use in the test. Jointly tests that
all squared autocorrelations up to and including lag j are zero for
j = 1, 2, ..., lags. If None, uses lag=12*(nobs/100)^{1/4}.
Returns
-------
Series
Series containing the test statistic and its p-values.
See Also
--------
statsmodels.stats.diagnostic.het_arch
ARCH-LM test.
statsmodels.stats.diagnostic.acorr_lm
LM test for autocorrelation.
"""
from statsmodels.stats.diagnostic import het_arch
lags = int_like(lags, "lags", optional=True)
nobs_effective = self.resid.shape[0]
if lags is None:
lags = min(nobs_effective // 5, 10)
out = []
for lag in range(1, lags + 1):
res = het_arch(self.resid, nlags=lag)
out.append([res[0], res[1], lag])
index = pd.RangeIndex(1, lags + 1, name="Lag")
cols = ["ARCH-LM", "P-value", "DF"]
return pd.DataFrame(out, columns=cols, index=index)
def diagnostic_summary(self):
"""
Returns a summary containing standard model diagnostic tests
Returns
-------
Summary
A summary instance with panels for serial correlation tests,
normality tests and heteroskedasticity tests.
See Also
--------
test_serial_correlation
Test models residuals for serial correlation.
test_normality
Test models residuals for deviations from normality.
test_heteroskedasticity
Test models residuals for conditional heteroskedasticity.
"""
from statsmodels.iolib.table import SimpleTable
spacer = SimpleTable([""])
smry = Summary()
sc = self.test_serial_correlation()
sc = sc.loc[sc.DF > 0]
values = [[i + 1] + row for i, row in enumerate(sc.values.tolist())]
data_fmts = ("%10d", "%10.3f", "%10.3f", "%10d")
if sc.shape[0]:
tab = SimpleTable(
values,
headers=["Lag"] + list(sc.columns),
title="Test of No Serial Correlation",
header_align="r",
data_fmts=data_fmts,
)
smry.tables.append(tab)
smry.tables.append(spacer)
jb = self.test_normality()
data_fmts = ("%10.3f", "%10.3f", "%10.3f", "%10.3f")
tab = SimpleTable(
[jb.values],
headers=list(jb.index),
title="Test of Normality",
header_align="r",
data_fmts=data_fmts,
)
smry.tables.append(tab)
smry.tables.append(spacer)
arch_lm = self.test_heteroskedasticity()
values = [
[i + 1] + row for i, row in enumerate(arch_lm.values.tolist())
]
data_fmts = ("%10d", "%10.3f", "%10.3f", "%10d")
tab = SimpleTable(
values,
headers=["Lag"] + list(arch_lm.columns),
title="Test of Conditional Homoskedasticity",
header_align="r",
data_fmts=data_fmts,
)
smry.tables.append(tab)
return smry
@Appender(remove_parameters(AutoReg.predict.__doc__, "params"))
def predict(
self, start=None, end=None, dynamic=False, exog=None, exog_oos=None
):
return self.model.predict(
self._params,
start=start,
end=end,
dynamic=dynamic,
exog=exog,
exog_oos=exog_oos,
)
def get_prediction(
self, start=None, end=None, dynamic=False, exog=None, exog_oos=None
):
"""
Predictions and prediction intervals
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out-of-sample prediction. Default is the last observation in
the sample. Unlike standard python slices, end is inclusive so
that all the predictions [start, start+1, ..., end-1, end] are
returned.
dynamic : {bool, int, str, datetime, Timestamp}, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Prior to this observation, true endogenous values
will be used for prediction; starting with this observation and
continuing through the end of prediction, forecasted endogenous
values will be used instead. Datetime-like objects are not
interpreted as offsets. They are instead used to find the index
location of `dynamic` which is then used to to compute the offset.
exog : array_like
A replacement exogenous array. Must have the same shape as the
exogenous data array used when the model was created.
exog_oos : array_like
An array containing out-of-sample values of the exogenous variable.
Must has the same number of columns as the exog used when the
model was created, and at least as many rows as the number of
out-of-sample forecasts.
Returns
-------
PredictionResults
Prediction results with mean and prediction intervals
"""
mean = self.predict(
start=start, end=end, dynamic=dynamic, exog=exog, exog_oos=exog_oos
)
mean_var = np.full_like(mean, self.sigma2)
mean_var[np.isnan(mean)] = np.nan
start = 0 if start is None else start
end = self.model._index[-1] if end is None else end
_, _, oos, _ = self.model._get_prediction_index(start, end)
if oos > 0:
ar_params = self._lag_repr()
ma = arma2ma(ar_params, np.ones(1), lags=oos)
mean_var[-oos:] = self.sigma2 * np.cumsum(ma**2)
if isinstance(mean, pd.Series):
mean_var = pd.Series(mean_var, index=mean.index)
return PredictionResults(mean, mean_var)
def forecast(self, steps=1, exog=None):
"""
Out-of-sample forecasts
Parameters
----------
steps : {int, str, datetime}, default 1
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency,
steps must be an integer.
exog : {ndarray, DataFrame}
Exogenous values to use out-of-sample. Must have same number of
columns as original exog data and at least `steps` rows
Returns
-------
array_like
Array of out of in-sample predictions and / or out-of-sample
forecasts.
See Also
--------
AutoRegResults.predict
In- and out-of-sample predictions
AutoRegResults.get_prediction
In- and out-of-sample predictions and confidence intervals
"""
start = self.model.data.orig_endog.shape[0]
if isinstance(steps, (int, np.integer)):
end = start + steps - 1
else:
end = steps
return self.predict(start=start, end=end, dynamic=False, exog_oos=exog)
def _plot_predictions(
self,
predictions,
start,
end,
alpha,
in_sample,
fig,
figsize,
):
"""Shared helper for plotting predictions"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
start = 0 if start is None else start
end = self.model._index[-1] if end is None else end
_, _, oos, _ = self.model._get_prediction_index(start, end)
ax = fig.add_subplot(111)
mean = predictions.predicted_mean
if not in_sample and oos:
if isinstance(mean, pd.Series):
mean = mean.iloc[-oos:]
elif not in_sample:
raise ValueError(
"in_sample is False but there are no"
"out-of-sample forecasts to plot."
)
ax.plot(mean, zorder=2)
if oos and alpha is not None:
ci = np.asarray(predictions.conf_int(alpha))
lower, upper = ci[-oos:, 0], ci[-oos:, 1]
label = "{0:.0%} confidence interval".format(1 - alpha)
x = ax.get_lines()[-1].get_xdata()
ax.fill_between(
x[-oos:],
lower,
upper,
color="gray",
alpha=0.5,
label=label,
zorder=1,
)
ax.legend(loc="best")
return fig
@Substitution(predict_params=_predict_params)
def plot_predict(
self,
start=None,
end=None,
dynamic=False,
exog=None,
exog_oos=None,
alpha=0.05,
in_sample=True,
fig=None,
figsize=None,
):
"""
Plot in- and out-of-sample predictions
Parameters
----------\n%(predict_params)s
alpha : {float, None}
The tail probability not covered by the confidence interval. Must
be in (0, 1). Confidence interval is constructed assuming normally
distributed shocks. If None, figure will not show the confidence
interval.
in_sample : bool
Flag indicating whether to include the in-sample period in the
plot.
fig : Figure
An existing figure handle. If not provided, a new figure is
created.
figsize: tuple[float, float]
Tuple containing the figure size values.
Returns
-------
Figure
Figure handle containing the plot.
"""
predictions = self.get_prediction(
start=start, end=end, dynamic=dynamic, exog=exog, exog_oos=exog_oos
)
return self._plot_predictions(
predictions, start, end, alpha, in_sample, fig, figsize
)
def plot_diagnostics(self, lags=10, fig=None, figsize=None):
"""
Diagnostic plots for standardized residuals
Parameters
----------
lags : int, optional
Number of lags to include in the correlogram. Default is 10.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residuals, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
# Eliminate residuals associated with burned or diffuse likelihoods
resid = self.resid
# Top-left: residuals vs time
ax = fig.add_subplot(221)
if hasattr(self.model.data, "dates") and self.data.dates is not None:
x = self.model.data.dates._mpl_repr()
x = x[self.model.hold_back :]
else:
hold_back = self.model.hold_back
x = hold_back + np.arange(self.resid.shape[0])
std_resid = resid / np.sqrt(self.sigma2)
ax.plot(x, std_resid)
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title("Standardized residual")
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
std_resid_nonmissing = std_resid[~(np.isnan(resid))]
ax = fig.add_subplot(222)
ax.hist(std_resid_nonmissing, density=True, label="Hist")
kde = gaussian_kde(std_resid)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label="KDE")
ax.plot(x, norm.pdf(x), label="N(0,1)")
ax.set_xlim(xlim)
ax.legend()
ax.set_title("Histogram plus estimated density")
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(std_resid, line="s", ax=ax)
ax.set_title("Normal Q-Q")
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title("Correlogram")
ax.set_ylim(-1, 1)
return fig
def summary(self, alpha=0.05):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
model = self.model
title = model.__class__.__name__ + " Model Results"
method = "Conditional MLE"
# get sample
start = self._hold_back
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime("%m-%d-%Y")]
sample += ["- " + dates[-1].strftime("%m-%d-%Y")]
else:
sample = [str(start), str(len(self.data.orig_endog))]
model = model.__class__.__name__
if self.model.seasonal:
model = "Seas. " + model
if self.ar_lags is not None and len(self.ar_lags) < self._max_lag:
model = "Restr. " + model
if self.model.exog is not None:
model += "-X"
order = "({0})".format(self._max_lag)
dep_name = str(self.model.endog_names)
top_left = [
("Dep. Variable:", [dep_name]),
("Model:", [model + order]),
("Method:", [method]),
("Date:", None),
("Time:", None),
("Sample:", [sample[0]]),
("", [sample[1]]),
]
top_right = [
("No. Observations:", [str(len(self.model.endog))]),
("Log Likelihood", ["%#5.3f" % self.llf]),
("S.D. of innovations", ["%#5.3f" % self.sigma2**0.5]),
("AIC", ["%#5.3f" % self.aic]),
("BIC", ["%#5.3f" % self.bic]),
("HQIC", ["%#5.3f" % self.hqic]),
]
smry = Summary()
smry.add_table_2cols(
self, gleft=top_left, gright=top_right, title=title
)
smry.add_table_params(self, alpha=alpha, use_t=False)
# Make the roots table
from statsmodels.iolib.table import SimpleTable
if self._max_lag:
arstubs = ["AR.%d" % i for i in range(1, self._max_lag + 1)]
stubs = arstubs
roots = self.roots
freq = self.arfreq
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable(
[
(
"%17.4f" % row[0],
"%+17.4fj" % row[1],
"%17.4f" % row[2],
"%17.4f" % row[3],
)
for row in data
],
headers=[
" Real",
" Imaginary",
" Modulus",
" Frequency",
],
title="Roots",
stubs=stubs,
)
smry.tables.append(roots_table)
if self._summary_text:
extra_txt = smry.extra_txt if smry.extra_txt is not None else []
smry.add_extra_txt(extra_txt + [self._summary_text])
return smry
def apply(self, endog, exog=None, refit=False, fit_kwargs=None):
"""
Apply the fitted parameters to new data unrelated to the original data
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`).
Returns
-------
AutoRegResults
Updated results object containing results for the new dataset.
See Also
--------
AutoRegResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset.
Care is needed when using deterministic processes with cyclical
components such as seasonal dummies or Fourier series. These
deterministic components will align to the first observation
in the data and so it is essential that any new data have the
same initial period.
Examples
--------
>>> import pandas as pd
>>> from statsmodels.tsa.ar_model import AutoReg
>>> index = pd.period_range(start='2000', periods=3, freq='A')
>>> original_observations = pd.Series([1.2, 1.5, 1.8], index=index)
>>> mod = AutoReg(original_observations, lags=1, trend="n")
>>> res = mod.fit()
>>> print(res.params)
y.L1 1.219512
dtype: float64
>>> print(res.fittedvalues)
2001 1.463415
2002 1.829268
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2003 2.195122
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='1980', periods=3, freq='A')
>>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index)
>>> new_res = res.apply(new_observations)
>>> print(new_res.params)
y.L1 1.219512
dtype: float64
>>> print(new_res.fittedvalues)
1981 1.707317
1982 0.365854
Freq: A-DEC, dtype: float64
>>> print(new_res.forecast(1))
1983 1.463415
Freq: A-DEC, dtype: float64
"""
existing = self.model
try:
deterministic = existing.deterministic
if deterministic is not None:
if isinstance(endog, (pd.Series, pd.DataFrame)):
index = endog.index
else:
index = np.arange(endog.shape[0])
deterministic = deterministic.apply(index)
mod = AutoReg(
endog,
lags=existing.ar_lags,
trend=existing.trend,
seasonal=existing.seasonal,
exog=exog,
hold_back=existing.hold_back,
period=existing.period,
deterministic=deterministic,
old_names=False,
)
except Exception as exc:
error = (
"An exception occured during the creation of the cloned "
"AutoReg instance when applying the existing model "
"specification to the new data. The original traceback "
"appears below."
)
exc.args = (error,) + exc.args
raise exc.with_traceback(exc.__traceback__)
if (mod.exog is None) != (existing.exog is None):
if existing.exog is not None:
raise ValueError(
"exog must be provided when the original model contained "
"exog variables"
)
raise ValueError(
"exog must be None when the original model did not contain "
"exog variables"
)
if (
existing.exog is not None
and existing.exog.shape[1] != mod.exog.shape[1]
):
raise ValueError(
f"The number of exog variables passed must match the original "
f"number of exog values ({existing.exog.shape[1]})"
)
if refit:
fit_kwargs = {} if fit_kwargs is None else fit_kwargs
return mod.fit(**fit_kwargs)
smry_txt = (
"Parameters and standard errors were estimated using a different "
"dataset and were then applied to this dataset."
)
res = AutoRegResults(
mod,
self.params,
self.cov_params_default,
self.normalized_cov_params,
use_t=self.use_t,
summary_text=smry_txt,
)
return AutoRegResultsWrapper(res)
def append(self, endog, exog=None, refit=False, fit_kwargs=None):
"""
Append observations to the ones used to fit the model
Creates a new result object using the current fitted parameters
where additional observations are appended to the data used
to fit the model. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`).
Returns
-------
AutoRegResults
Updated results object containing results for the new dataset.
See Also
--------
AutoRegResults.apply
statsmodels.tsa.statespace.mlemodel.MLEResults.append
Notes
-----
The endog and exog arguments to this method must be formatted in the
same way (e.g. Pandas Series versus Numpy array) as were the endog
and exog arrays passed to the original model.
The endog argument to this method should consist of new observations
that occurred directly after the last element of endog. For any other
kind of dataset, see the apply method.
Examples
--------
>>> import pandas as pd
>>> from statsmodels.tsa.ar_model import AutoReg
>>> index = pd.period_range(start='2000', periods=3, freq='A')
>>> original_observations = pd.Series([1.2, 1.4, 1.8], index=index)
>>> mod = AutoReg(original_observations, lags=1, trend="n")
>>> res = mod.fit()
>>> print(res.params)
y.L1 1.235294
dtype: float64
>>> print(res.fittedvalues)
2001 1.482353
2002 1.729412
Freq: A-DEC, dtype: float64
>>> print(res.forecast(1))
2003 2.223529
Freq: A-DEC, dtype: float64
>>> new_index = pd.period_range(start='2003', periods=3, freq='A')
>>> new_observations = pd.Series([2.1, 2.4, 2.7], index=new_index)
>>> updated_res = res.append(new_observations)
>>> print(updated_res.params)
y.L1 1.235294
dtype: float64
>>> print(updated_res.fittedvalues)
dtype: float64
2001 1.482353
2002 1.729412
2003 2.223529
2004 2.594118
2005 2.964706
Freq: A-DEC, dtype: float64
>>> print(updated_res.forecast(1))
2006 3.335294
Freq: A-DEC, dtype: float64
"""
def _check(orig, new, name, use_pandas=True):
from statsmodels.tsa.statespace.mlemodel import _check_index
typ = type(orig)
if not isinstance(new, typ):
raise TypeError(
f"{name} must have the same type as the {name} used to "
f"originally create the model ({typ.__name__})."
)
if not use_pandas:
return np.concatenate([orig, new])
start = len(orig)
end = start + len(new) - 1
_, _, _, append_ix = self.model._get_prediction_index(start, end)
_check_index(append_ix, new, title=name)
return pd.concat([orig, new], axis=0)
existing = self.model
no_exog = existing.exog is None
if no_exog != (exog is None):
if no_exog:
err = (
"Original model does not contain exog data but exog data "
"passed"
)
else:
err = "Original model has exog data but not exog data passed"
raise ValueError(err)
if isinstance(existing.data.orig_endog, (pd.Series, pd.DataFrame)):
endog = _check(existing.data.orig_endog, endog, "endog")
else:
endog = _check(
existing.endog, np.asarray(endog), "endog", use_pandas=False
)
if isinstance(existing.data.orig_exog, (pd.Series, pd.DataFrame)):
exog = _check(existing.data.orig_exog, exog, "exog")
elif exog is not None:
exog = _check(
existing.exog, np.asarray(exog), "endog", use_pandas=False
)
return self.apply(endog, exog, refit=refit, fit_kwargs=fit_kwargs)
class AutoRegResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(
tsa_model.TimeSeriesResultsWrapper._wrap_attrs, _attrs
)
_methods = {}
_wrap_methods = wrap.union_dicts(
tsa_model.TimeSeriesResultsWrapper._wrap_methods, _methods
)
wrap.populate_wrapper(AutoRegResultsWrapper, AutoRegResults)
doc = Docstring(AutoReg.__doc__)
_auto_reg_params = doc.extract_parameters(
[
"trend",
"seasonal",
"exog",
"hold_back",
"period",
"missing",
"old_names",
],
4,
)
@Substitution(auto_reg_params=_auto_reg_params)
def ar_select_order(
endog,
maxlag,
ic="bic",
glob=False,
trend: Literal["n", "c", "ct", "ctt"] = "c",
seasonal=False,
exog=None,
hold_back=None,
period=None,
missing="none",
old_names=False,
):
"""
Autoregressive AR-X(p) model order selection.
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The independent variable.
maxlag : int
The maximum lag to consider.
ic : {'aic', 'hqic', 'bic'}
The information criterion to use in the selection.
glob : bool
Flag indicating where to use a global search across all combinations
of lags. In practice, this option is not computational feasible when
maxlag is larger than 15 (or perhaps 20) since the global search
requires fitting 2**maxlag models.\n%(auto_reg_params)s
Returns
-------
AROrderSelectionResults
A results holder containing the model and the complete set of
information criteria for all models fit.
Examples
--------
>>> from statsmodels.tsa.ar_model import ar_select_order
>>> data = sm.datasets.sunspots.load_pandas().data['SUNACTIVITY']
Determine the optimal lag structure
>>> mod = ar_select_order(data, maxlag=13)
>>> mod.ar_lags
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
Determine the optimal lag structure with seasonal terms
>>> mod = ar_select_order(data, maxlag=13, seasonal=True, period=12)
>>> mod.ar_lags
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
Globally determine the optimal lag structure
>>> mod = ar_select_order(data, maxlag=13, glob=True)
>>> mod.ar_lags
array([1, 2, 9])
"""
full_mod = AutoReg(
endog,
maxlag,
trend=trend,
seasonal=seasonal,
exog=exog,
hold_back=hold_back,
period=period,
missing=missing,
old_names=old_names,
)
nexog = full_mod.exog.shape[1] if full_mod.exog is not None else 0
y, x = full_mod._y, full_mod._x
base_col = x.shape[1] - nexog - maxlag
sel = np.ones(x.shape[1], dtype=bool)
ics: list[tuple[int | tuple[int, ...], tuple[float, float, float]]] = []
def compute_ics(res):
nobs = res.nobs
df_model = res.df_model
sigma2 = 1.0 / nobs * sumofsq(res.resid)
llf = -nobs * (np.log(2 * np.pi * sigma2) + 1) / 2
res = SimpleNamespace(
nobs=nobs, df_model=df_model, sigma2=sigma2, llf=llf
)
aic = call_cached_func(AutoRegResults.aic, res)
bic = call_cached_func(AutoRegResults.bic, res)
hqic = call_cached_func(AutoRegResults.hqic, res)
return aic, bic, hqic
def ic_no_data():
"""Fake mod and results to handle no regressor case"""
mod = SimpleNamespace(
nobs=y.shape[0], endog=y, exog=np.empty((y.shape[0], 0))
)
llf = OLS.loglike(mod, np.empty(0))
res = SimpleNamespace(
resid=y, nobs=y.shape[0], llf=llf, df_model=0, k_constant=0
)
return compute_ics(res)
if not glob:
sel[base_col : base_col + maxlag] = False
for i in range(maxlag + 1):
sel[base_col : base_col + i] = True
if not np.any(sel):
ics.append((0, ic_no_data()))
continue
res = OLS(y, x[:, sel]).fit()
lags = tuple(j for j in range(1, i + 1))
lags = 0 if not lags else lags
ics.append((lags, compute_ics(res)))
else:
bits = np.arange(2**maxlag, dtype=np.int32)[:, None]
bits = bits.view(np.uint8)
bits = np.unpackbits(bits).reshape(-1, 32)
for i in range(4):
bits[:, 8 * i : 8 * (i + 1)] = bits[:, 8 * i : 8 * (i + 1)][
:, ::-1
]
masks = bits[:, :maxlag]
for mask in masks:
sel[base_col : base_col + maxlag] = mask
if not np.any(sel):
ics.append((0, ic_no_data()))
continue
res = OLS(y, x[:, sel]).fit()
lags = tuple(np.where(mask)[0] + 1)
lags = 0 if not lags else lags
ics.append((lags, compute_ics(res)))
key_loc = {"aic": 0, "bic": 1, "hqic": 2}[ic]
ics = sorted(ics, key=lambda x: x[1][key_loc])
selected_model = ics[0][0]
mod = AutoReg(
endog,
selected_model,
trend=trend,
seasonal=seasonal,
exog=exog,
hold_back=hold_back,
period=period,
missing=missing,
old_names=old_names,
)
return AROrderSelectionResults(mod, ics, trend, seasonal, period)
class AROrderSelectionResults(object):
"""
Results from an AR order selection
Contains the information criteria for all fitted model orders.
"""
def __init__(
self,
model: AutoReg,
ics: list[tuple[int | tuple[int, ...], tuple[float, float, float]]],
trend: Literal["n", "c", "ct", "ctt"],
seasonal: bool,
period: int | None,
):
self._model = model
self._ics = ics
self._trend = trend
self._seasonal = seasonal
self._period = period
aic = sorted(ics, key=lambda r: r[1][0])
self._aic = dict([(key, val[0]) for key, val in aic])
bic = sorted(ics, key=lambda r: r[1][1])
self._bic = dict([(key, val[1]) for key, val in bic])
hqic = sorted(ics, key=lambda r: r[1][2])
self._hqic = dict([(key, val[2]) for key, val in hqic])
@property
def model(self) -> AutoReg:
"""The model selected using the chosen information criterion."""
return self._model
@property
def seasonal(self) -> bool:
"""Flag indicating if a seasonal component is included."""
return self._seasonal
@property
def trend(self) -> Literal["n", "c", "ct", "ctt"]:
"""The trend included in the model selection."""
return self._trend
@property
def period(self) -> int | None:
"""The period of the seasonal component."""
return self._period
@property
def aic(self) -> dict[int | tuple[int, ...], float]:
"""
The Akaike information criterion for the models fit.
Returns
-------
dict[tuple, float]
"""
return self._aic
@property
def bic(self) -> dict[int | tuple[int, ...], float]:
"""
The Bayesian (Schwarz) information criteria for the models fit.
Returns
-------
dict[tuple, float]
"""
return self._bic
@property
def hqic(self) -> dict[int | tuple[int, ...], float]:
"""
The Hannan-Quinn information criteria for the models fit.
Returns
-------
dict[tuple, float]
"""
return self._hqic
@property
def ar_lags(self) -> list[int] | None:
"""The lags included in the selected model."""
return self._model.ar_lags
| statsmodels/statsmodels | statsmodels/tsa/ar_model.py | Python | bsd-3-clause | 79,633 | [
"Gaussian"
] | 7297f0e0e3e4e9810dbbe4deeffdea7cb4b8679cfe2078444f42b84880ed808a |
from MafiaBot.MafiaRole import MafiaRole
from MafiaBot.MafiaAction import MafiaAction
class Sleepwalker(MafiaRole):
def GetRolePM(self):
ret = 'You are a Sleepwalker. You have to visit another player at night. This action does nothing.'
if self.limiteduses > -1:
ret += ' You may only use this ability '+str(self.limiteduses)+' times.'
return ret
@staticmethod
def GetRoleName():
return 'Sleepwalker'
@staticmethod
def GetRoleDescription():
return 'Sleepwalkers compulsorily visit other players at night. That action accomplishes nothing.'
def HandleCommand(self, command, param, mb, player):
if self.requiredaction:
if command == 'visit':
if not self.limiteduses == 0:
target = mb.GetPlayer(param)
if target is not None:
if not target.IsDead():
if target is player:
return 'You cannot visit yourself!'
else:
mb.actionlist.append(MafiaAction(MafiaAction.VISIT, player, target, True))
self.requiredaction = False
player.UpdateActions()
ret = 'You visit '+str(target)+' tonight.'
self.limiteduses -= 1
if self.limiteduses > -1:
ret += ' You have '+str(self.limiteduses)+' visits remaining.'
return ret
return 'Cannot find player '+param
return None
def BeginNightPhase(self, mb, player):
if not self.limiteduses == 0:
self.requiredaction = True
self.mandatoryaction = True
ret = 'Sleepwalker: [Mandatory!] You have to visit another player tonight. Use !visit <player> to visit that player.'
if self.limiteduses > -1:
ret += ' You have '+str(self.limiteduses)+' visits remaining.'
return ret
else:
return ''
| LLCoolDave/MafiaBot | MafiaBot/Roles/Sleepwalker.py | Python | mit | 2,173 | [
"VisIt"
] | a90e01a75ce46659d619e8bd0f5f3d3b9e1ccc3cad42a1a15c25ad6466c588b8 |
# FIXME: to bring back to life
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
import cmd
import sys
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.AbstractFileSystem import DFCFileSystem
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.DirectoryCompletion import DirectoryCompletion
class DirCompletion(cmd.Cmd):
fc = FileCatalogClient()
dfc_fs = DFCFileSystem(fc)
dc = DirectoryCompletion(dfc_fs)
def do_exit(self, args):
sys.exit(0)
def _listdir(self, args):
if self.dfc_fs.is_dir(args):
return self.dfc_fs.list_dir(args)
else:
return [args]
def _ls(self, args):
try:
return self._listdir(args)
except:
return []
def do_ls(self, args):
print
print " ".join(self._ls(args))
def complete_ls(self, text, line, begidx, endidx):
#print
result = []
cur_input_line = line.split()
#print cur_input_line
cur_path = "/"
if (len(cur_input_line) == 2):
cur_path = cur_input_line[1]
#print "cur_path:", cur_path
result = self.dc.parse_text_line(text, cur_path, "/" )
return result
if __name__ == "__main__":
cli = DirCompletion()
cli.cmdloop()
| Andrew-McNab-UK/DIRAC | tests/Integration/DataManagementSystem/FIXME_dfc_dir_completion.py | Python | gpl-3.0 | 1,214 | [
"DIRAC"
] | 9048c9ff52211a33cb112fc0ba659138749217dfdfec4be4d01e28dd76b8fb4e |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import os
import sys
import pysam
from paleomix.common.sequences import NT_CODES
from paleomix.common.utilities import try_cast
import paleomix.common.argparse as argparse
import paleomix.common.fileutils as fileutils
import paleomix.pipelines.zonkey.common as common
_CHUNK_SIZE = 1000000
_SETTINGS_TEMPLATE = """
# Database format; is incremented when the format changes
Format: 1
# Revision number; is incremented when the database (but not format) changes
Revision: {Revision}
# Arguments passed to plink
Plink: "--horse"
# Number of autosomal chromosomes; required for e.g. PCA analyses.
# This includes autosomal chromosomes not included in the analyses.
NChroms: {NChroms}
# N bases of padding used for mitochondrial sequences; the last N bases of the
# alignments are expected to be the same as the first N bases, in order to
# allow alignments at this region of the genome, and are combined to generate
# final consensus.
MitoPadding: 30
# The minimum distance between SNPs, assuming an even distribution of SNPs
# across the genome. Used when --treemix-k is set to 'auto', which is the
# default behavior. Value from McCue 2012 (doi:10.1371/journal.pgen.1002451).
SNPDistance: 150000
"""
_BUILD_SH_TEMPLATE = """#!/bin/bash
set -o nounset # Fail on unset variables
set -o errexit # Fail on uncaught non-zero returncodes
MITO_FA="mitochondria.fasta"
if [ ! -e "${MITO_FA}" ];
then
echo "WARNING: Mitochondrial FASTA ('${MITO_FA}') not found!"
MITO_FA=""
fi
SIM_TXT="simulations.txt"
if [ ! -e "${SIM_TXT}" ];
then
echo "WARNING: Simulations ('${SIM_TXT}') not found!"
SIM_TXT=""
fi
EXAMPLES="examples"
if [ ! -d "${EXAMPLES}" ];
then
echo "WARNING: Examples ('${EXAMPLES}') not found!"
EXAMPLES=""
fi
FILENAME="zonkey{REVISION}.tar"
SOURCES="settings.yaml contigs.txt samples.txt ${MITO_FA} ${SIM_TXT} ${EXAMPLES} genotypes.txt build.sh"
rm -vf "${FILENAME}"
if ! tar cvf "${FILENAME}" ${SOURCES};
then
echo "Removing partial files"
rm -vf "${FILENAME}"
exit 1
fi
"""
class ZonkeyError(RuntimeError):
pass
def _write_build_sh(args, filename):
sys.stderr.write("Writing %r\n" % (filename,))
if os.path.exists(filename) and not args.overwrite:
sys.stderr.write(" File exists; skipping.\n")
return
tmpl = _BUILD_SH_TEMPLATE.replace("{REVISION}", args.revision)
with open(filename, "w") as handle:
handle.write(tmpl)
def _write_genotypes(args, data, filename):
sys.stderr.write("Writing %r\n" % (filename,))
if os.path.exists(filename) and not args.overwrite:
sys.stderr.write(" File exists; skipping.\n")
return
samples = data["samples"]
keys = tuple(sorted(samples))
ref_handle = pysam.FastaFile(args.reference)
with open(filename, "w") as handle:
header = ("Chrom", "Pos", "Ref", ";".join(keys))
handle.write("%s\n" % ("\t".join(header)))
for contig, size in sorted(data["contigs"].items()):
# Skip non-autosomal contigs
if not isinstance(contig, int):
continue
sys.stderr.write(" - %s: 0%%\r" % (contig,))
for pos in range(0, size, _CHUNK_SIZE):
sys.stderr.write(" - %s: % 3i%%\r" % (contig, (100 * pos) / size))
chunks = []
for key in keys:
real_name = samples[key]["contigs"][contig]
fasta_handle = samples[key]["handle"]
chunk = fasta_handle.fetch(real_name, pos, pos + _CHUNK_SIZE)
chunks.append(chunk)
ref_chunk = ref_handle.fetch(real_name, pos, pos + _CHUNK_SIZE)
for idx, row in enumerate(zip(*chunks)):
if "N" in row:
continue
nucleotides = set()
for nuc in row:
nucleotides.update(NT_CODES[nuc])
if len(nucleotides) == 2:
handle.write(
"%s\t%i\t%s\t%s\n"
% (contig, pos + idx + 1, ref_chunk[idx], "".join(row))
)
sys.stderr.write(" - %s: 100%%\n" % (contig,))
def _write_settings(args, contigs, filename):
sys.stderr.write("Writing %r\n" % (filename,))
if os.path.exists(filename) and not args.overwrite:
sys.stderr.write(" File exists; skipping.\n")
return
# Determine the highest numbered chromosome; this is required by,
# for example, SmartPCA.
nchroms = max(name for name in contigs if isinstance(name, int))
with open(filename, "w") as handle:
handle.write(_SETTINGS_TEMPLATE.format(Revision=args.revision, NChroms=nchroms))
def _write_contigs(args, filename):
sys.stderr.write("Writing %r\n" % (filename,))
if os.path.exists(filename) and not args.overwrite:
sys.stderr.write(" File exists; skipping.\n")
return
fasta_handle = pysam.FastaFile(args.reference)
contigs = _read_contigs(args.reference)
lines = ["ID\tSize\tNs\tChecksum"]
for name, (real_name, size) in sorted(contigs.items()):
sys.stderr.write(" - %s: 0%%\r" % (name,))
n_uncalled = 0
for pos in range(0, size, _CHUNK_SIZE):
sys.stderr.write(" - %s: % 3i%%\r" % (name, (100 * pos) / size))
chunk = fasta_handle.fetch(real_name, pos, pos + _CHUNK_SIZE)
n_uncalled += chunk.count("n")
n_uncalled += chunk.count("N")
n_uncalled += chunk.count("-")
sys.stderr.write(" - %s: 100%%\n" % (name,))
lines.append("%s\t%i\t%i\t%s" % (name, size, n_uncalled, "NA"))
lines.append("")
with open(filename, "w") as handle:
handle.write("\n".join(lines))
def _write_samples(args, samples, filename):
sys.stderr.write("Writing %r\n" % (filename,))
if os.path.exists(filename) and not args.overwrite:
sys.stderr.write(" File exists; skipping.\n")
return
lines = ["ID\tGroup(2)\tGroup(3)\tSpecies\tSex\tSampleID\tPublication"]
for name in sorted(samples):
lines.append("%s\t-\t-\tNA\tNA\t%s\tNA" % (name, name))
lines.append("")
with open(filename, "w") as handle:
handle.write("\n".join(lines))
def _process_contigs(reference, samples):
ref_contigs = _read_contigs(reference)
for name, (_, size) in list(ref_contigs.items()):
ref_contigs[name] = size
for sample_name, obs_data in list(samples.items()):
obs_contigs = obs_data["contigs"]
for ref_name, ref_size in ref_contigs.items():
if ref_name not in obs_contigs:
raise ZonkeyError(
"Contig missing for sample %r: %r" % (sample_name, ref_name)
)
obs_name, obs_size = obs_contigs[ref_name]
if obs_size != ref_size:
raise ZonkeyError(
"Contig %r for sample %r has wrong size; "
"%i observed vs %i expected"
% (obs_name, sample_name, obs_size, ref_size)
)
obs_contigs[ref_name] = obs_name
return {"samples": samples, "contigs": ref_contigs}
def _read_contigs(filename):
contigs = {}
with open(filename + ".fai") as handle:
for line in handle:
name, size, _ = line.split("\t", 2)
if name in contigs:
raise ZonkeyError(
"FASTA file %r contains multiple contigs "
"with the same name (%r); this is not "
"supported." % (filename, name)
)
fixed_name = common.contig_name_to_plink_name(name)
if fixed_name is not None:
contigs[try_cast(fixed_name, int)] = (name, int(size))
return contigs
def _collect_samples(reference, filenames):
samples = {}
for filename in filenames:
basename = os.path.basename(filename).split(".", 1)[0]
if basename in samples:
raise ZonkeyError("Duplicate sample name %r" % (filename,))
# Open first to insure that file is indexed
handle = pysam.FastaFile(filename)
contigs = _read_contigs(filename)
if not contigs:
raise ZonkeyError("No usable contigs found in %r." % (filename,))
samples[basename] = {"handle": handle, "contigs": contigs}
return _process_contigs(reference, samples)
def parse_args(argv):
parser = argparse.ArgumentParser(prog="paleomix zonkey:db")
parser.add_argument(
"root", help="Root directory in which to write reference panel files."
)
parser.add_argument("reference", help="Reference genome in FASTA format.")
parser.add_argument(
"samples",
nargs="+",
help="Samples to include in the reference-panel, in "
"the form of FASTA files that map one-to-one "
"to the reference sequences. That is to say "
"that every position in the sample FASTA must "
"be homologus to the same position in the "
"reference sequence.",
)
parser.add_argument(
"--overwrite",
default=False,
action="store_true",
help="If set, the program is allowed to overwrite "
"already existing output files.",
)
return parser.parse_args(argv)
def main(argv):
args = parse_args(argv)
args.revision = datetime.datetime.today().strftime("%Y%m%d")
data = _collect_samples(args.reference, args.samples)
if not data:
return 1
fileutils.make_dirs(args.root)
_write_contigs(args, os.path.join(args.root, "contigs.txt"))
_write_samples(args, data["samples"], os.path.join(args.root, "samples.txt"))
_write_settings(args, data["contigs"], os.path.join(args.root, "settings.yaml"))
_write_genotypes(args, data, os.path.join(args.root, "genotypes.txt"))
_write_build_sh(args, os.path.join(args.root, "build.sh"))
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| MikkelSchubert/paleomix | paleomix/pipelines/zonkey/build_db.py | Python | mit | 11,228 | [
"pysam"
] | a41e309c876512562bd79e89d993fa62fe8c2ecef6aaa5ec327453cfe9c4ecf3 |
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to
# submit large numbers of jobs on supercomputers. It provides a python interface to physical input,
# such as crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential
# programs. It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import mark
def random_matrix(n=10):
""" Yields random invertible 3x3 matrices """
from numpy.random import random
from numpy.linalg import det
from numpy import abs
for i in range(n):
matrix = 10 * (random((3, 3)) - 0.5)
while abs(det(matrix)) < 1e-4:
matrix = 10 * (random((3, 3)) - 0.5)
yield matrix
@mark.parametrize('cell', random_matrix(10))
def test_third_order_regression(cell):
from numpy import abs
from pylada.crystal.defects import third_order as pyto
from pylada.crystal.defects._defects import third_order as cto
assert abs(pyto(cell, 10) - cto(cell, 10)) < 1e-8
| pylada/pylada-light | tests/crystal/test_third_order.py | Python | gpl-3.0 | 1,819 | [
"CRYSTAL",
"VASP"
] | c54533869f56b7a75ca39cb0508a9ab03752d426593c1654b54ed7c6d00cfa38 |
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Commonly-required utility methods needed by -- and potentially
customized by -- application and toolkit scripts. They have
been pulled out from the scripts because certain scripts had
gotten way too large as a result of including these methods."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.orca_state as orca_state
import orca.scripts.toolkits.Gecko as Gecko
#############################################################################
# #
# Utilities #
# #
#############################################################################
class Utilities(Gecko.Utilities):
def __init__(self, script):
"""Creates an instance of the Utilities class.
Arguments:
- script: the script with which this instance is associated.
"""
Gecko.Utilities.__init__(self, script)
#########################################################################
# #
# Utilities for finding, identifying, and comparing accessibles #
# #
#########################################################################
def documentFrame(self):
"""Returns the document frame that holds the content being shown.
Overridden here because multiple open messages are not arranged
in tabs like they are in Firefox."""
if self.inFindToolbar():
return Gecko.Utilities.documentFrame(self)
obj = orca_state.locusOfFocus
while obj:
role = obj.getRole()
if role in [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_EMBEDDED]:
return obj
else:
obj = obj.parent
return None
def isEntry(self, obj):
"""Returns True if we should treat this object as an entry."""
return obj and obj.getRole() == pyatspi.ROLE_ENTRY
def isPasswordText(self, obj):
"""Returns True if we should treat this object as password text."""
return obj and obj.getRole() == pyatspi.ROLE_PASSWORD_TEXT
#########################################################################
# #
# Utilities for working with the accessible text interface #
# #
#########################################################################
#########################################################################
# #
# Miscellaneous Utilities #
# #
#########################################################################
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/Thunderbird/script_utilities.py | Python | gpl-3.0 | 4,032 | [
"ORCA"
] | a1b41a4c8f4143fa64b283034673387dddf032092943a6418cb368def709f59b |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import numpy as np
import unittest2 as unittest
from pymatgen.analysis.eos import EOS
def have_scipy():
try:
import scipy
return True
except ImportError:
return False
class EOSTestCase(unittest.TestCase):
def setUp(self):
self.volumes = np.array([13.72, 14.83, 16.0, 17.23, 18.52])
self.energies = np.array([-56.29, -56.41, -56.46, -56.46, -56.42])
@unittest.skipUnless(have_scipy(), "test_fit requires scipy")
def test_fit(self):
"""Test EOS fit"""
for eos_name in EOS.MODELS:
eos = EOS(eos_name=eos_name)
fit = eos.fit(self.volumes, self.energies)
print(fit)
#fit.plot()
if __name__ == "__main__":
unittest.main()
| Bismarrck/pymatgen | pymatgen/io/abinit/tests/test_eos.py | Python | mit | 927 | [
"pymatgen"
] | d0c57f7811e6252b4ecc73b8be123071a9912594b3e79d25b69d898638afc623 |
"""Contains time series code
"""
import os
import gzip
import numpy as np
import pandas as pd
from scipy.stats import zscore
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from scipy.stats import multivariate_normal
from scipy.stats import chi2
from ggr.util.parallelize import setup_multiprocessing_queue
from ggr.util.parallelize import run_in_parallel
from ggr.util.utils import run_shell_cmd
def merge_cluster_files(cluster_files, out_file, cluster_name_pos=2):
"""Given a list of cluster files, merge
"""
# merge all clusters
clusters = None
for cluster_file in cluster_files:
clusters_tmp = pd.read_table(cluster_file, sep='\t', index_col=0, header=None)
col_name = os.path.basename(cluster_file).split(".")[cluster_name_pos]
clusters_tmp[col_name] = [1 for i in range(clusters_tmp.shape[0])]
clusters_tmp["region"] = clusters_tmp.index
clusters_tmp = clusters_tmp[["region", col_name]]
if clusters is None:
clusters = clusters_tmp
else:
clusters = clusters.merge(clusters_tmp, how="outer", on="region")
clusters = clusters.fillna(0)
clusters.index = clusters["region"]
del clusters["region"]
# and set up master cluster id (for sorting)
clusters["atac_cluster"] = [0 for i in range(clusters.shape[0])]
num_clusters = clusters.shape[1] - 1
for i in range(num_clusters):
clusters["atac_cluster"] = clusters["atac_cluster"] + (10**i) * (clusters.iloc[:,i])
clusters["atac_cluster"] = clusters["atac_cluster"].astype(int)
# and sort by ID column
clusters = clusters.sort_values("atac_cluster")
clusters.to_csv(out_file, sep='\t', compression="gzip")
return
def get_cluster_means(cluster_file, timeseries_file):
"""From cluster file and timeseries file, get
means of the clusters and return as numpy array
"""
# open up cluster file
clusters_df = pd.read_table(cluster_file, sep='\t')
clusters_df.columns = ["cluster", "regions"]
cluster_nums = list(set(clusters_df["cluster"].tolist()))
# open up timeseries file
timeseries_df = pd.read_table(timeseries_file, sep='\t', index_col=0)
timeseries_df["regions"] = timeseries_df.index
# merge the two
merged_data = timeseries_df.merge(clusters_df, on="regions")
del merged_data["regions"]
# set up numpy out matrix
cluster_means = np.zeros((len(cluster_nums), merged_data.shape[1]-1))
# get cluster means
for cluster_idx in range(len(cluster_nums)):
cluster_num = cluster_idx + 1
cluster_data = pd.DataFrame(merged_data[merged_data["cluster"] == cluster_num])
del cluster_data["cluster"]
cluster_means[cluster_idx,:] = cluster_data.mean(axis=0)
# if zscore, do so on rows
cluster_means_z = zscore(cluster_means, axis=1)
return cluster_means_z
def get_cluster_sufficient_stats(cluster_file, timeseries_file):
"""From cluster file and timeseries file, get
means and covariances of the clusters and return as numpy array
"""
# open up cluster file
clusters_df = pd.read_table(cluster_file, sep='\t')
clusters_df.columns = ["cluster", "regions"]
cluster_nums = list(set(clusters_df["cluster"].tolist()))
# open up timeseries file and zscore
timeseries_df = pd.read_table(timeseries_file, sep='\t', index_col=0)
timeseries_df = timeseries_df.apply(zscore, axis=1, result_type="broadcast") #.to_frame()
timeseries_df["regions"] = timeseries_df.index
# merge the two
# TODO make sure to remove examples that are not among the clusters
merged_data = timeseries_df.merge(clusters_df, on="regions")
del merged_data["regions"]
# set up numpy out matrix
cluster_means = []
cluster_covariances = []
cluster_sizes = []
cluster_names = []
# per cluster, get info
for cluster_idx in range(len(cluster_nums)):
cluster_num = cluster_idx + 1
cluster_data = pd.DataFrame(merged_data[merged_data["cluster"] == cluster_num])
del cluster_data["cluster"]
# mean and covariance
cluster_means.append(
cluster_data.mean(axis=0).as_matrix())
cluster_covariances.append(
cluster_data.cov().as_matrix()) # note pandas already normalizes by N-1
# other useful info
cluster_sizes.append(cluster_data.shape[0])
cluster_names.append(cluster_num)
return cluster_means, cluster_covariances, cluster_sizes, cluster_names
def filter_null_and_small_clusters_old(
cluster_means,
cluster_covariances,
cluster_sizes,
cluster_names,
ci=0.999, # sort of multiple hypothesis correction
size_cutoff=1000):
"""Quick filters to remove trajectory groups whose multivariate Gaussian
does NOT reject the null (ie, vector of zeros falls in the confidence
interval) as well as small clusters
"""
pdf_cutoff = chi2.pdf(1-ci, cluster_means[0].shape[0])
indices_to_delete = []
for cluster_idx in range(len(cluster_means)):
if cluster_sizes[cluster_idx] <= size_cutoff:
indices_to_delete.append(cluster_idx)
continue
pdf_val = multivariate_normal.pdf(
np.array([0 for i in range(cluster_means[0].shape[0])]),
mean=cluster_means[cluster_idx],
cov=cluster_covariances[cluster_idx],
allow_singular=True)
if pdf_val > pdf_cutoff:
indices_to_delete.append(cluster_idx)
continue
print cluster_sizes
print indices_to_delete
# fix clusters
for index in sorted(indices_to_delete, reverse=True):
del cluster_means[index]
del cluster_covariances[index]
del cluster_sizes[index]
del cluster_names[index]
return cluster_means, cluster_covariances, cluster_sizes, cluster_names
def filter_null_and_small_clusters(
cluster_file,
mat_file,
out_cluster_file,
ci=0.999, # sort of multiple hypothesis correction
size_cutoff=1000):
"""Quick filters to remove trajectory groups whose multivariate Gaussian
does NOT reject the null (ie, vector of zeros falls in the confidence
interval) as well as small clusters
Args:
mat_file: file with the timepoint data, with 0-column with ids
cluster_file: file with clusters, 0-column is clusters, 1-column is ids
"""
# first get sufficient statistics for the clusters
cluster_means, cluster_covariances, cluster_sizes, cluster_names = get_cluster_sufficient_stats(
cluster_file, mat_file)
# set up size cutoff
size_cutoff = size_cutoff * np.sum(cluster_sizes)
# calculate what the chi2 cutoffs are based on the means and confidence intervals
pdf_cutoff = chi2.pdf(1-ci, cluster_means[0].shape[0])
indices_to_delete = []
for cluster_idx in range(len(cluster_means)):
# remove clusters by cluster size
if cluster_sizes[cluster_idx] <= size_cutoff:
indices_to_delete.append(cluster_idx)
continue
# calculate the multivariate normal (GP) and determine if cluster
# does not reject the null.
pdf_val = multivariate_normal.pdf(
np.array([0 for i in range(cluster_means[0].shape[0])]),
mean=cluster_means[cluster_idx],
cov=cluster_covariances[cluster_idx],
allow_singular=True)
if pdf_val > pdf_cutoff:
indices_to_delete.append(cluster_idx)
continue
print cluster_sizes
print indices_to_delete
cluster_names_to_delete = (np.array(indices_to_delete) + 1).tolist()
# return cluster file
cluster_list = pd.read_table(cluster_file)
cluster_list = cluster_list[~cluster_list["cluster"].isin(cluster_names_to_delete)]
# and now renumber
clusters_remaining = cluster_list["cluster"].unique().tolist()
renumbering = dict(zip(clusters_remaining, range(1, len(clusters_remaining)+1)))
print renumbering
cluster_list["cluster"].replace(renumbering, inplace=True)
cluster_list.columns = ["cluster", "id"]
cluster_list = cluster_list.sort_values("cluster")
# save out
cluster_list.to_csv(out_cluster_file, sep="\t", index=False)
return None
def get_consistent_soft_clusters_by_region(
cluster_means,
cluster_covariances,
cluster_names,
timepoint_vectors,
pdf_cutoff,
corr_cutoff,
epsilon):
"""Given a list of timepoint vectors, compare to all clusters
then perform an intersect to get consistent clusters
utilizing a multivariate normal distribution
"""
soft_cluster_sets = []
for timepoint_vector in timepoint_vectors:
cluster_set = []
for cluster_idx in range(len(cluster_means)):
# determine if in confidence interval
pdf_val = multivariate_normal.pdf(
timepoint_vector,
mean=cluster_means[cluster_idx],
cov=cluster_covariances[cluster_idx],
allow_singular=True) # this has to do with scaling? stack overflow 35273908
if pdf_val < pdf_cutoff: # if not in confidence interval, continue
continue
# pearson: check relative differences
pearson_corr, pearson_pval = pearsonr(
timepoint_vector, cluster_means[cluster_idx])
if pearson_pval > corr_cutoff: # if pval is not significant, continue
continue
# spearman: check rank order of timepoints
spearman_corr, spearman_pval = spearmanr(
timepoint_vector, cluster_means[cluster_idx])
if spearman_pval > corr_cutoff: # if pval is not significant, continue
continue
# if all conditions are met, add to cluster set
cluster_set.append(cluster_idx)
# append set to all cluster sets
soft_cluster_sets.append(set(cluster_set))
# intersect
consistent_cluster_indices = list(set.intersection(*soft_cluster_sets))
consistent_clusters = [cluster_names[cluster_idx]
for cluster_idx in consistent_cluster_indices]
# With the consistent clusters, figure out best fit (to have a hard cluster assignment)
best_pdf_val = 0
hard_cluster = None
for cluster_idx in consistent_cluster_indices:
# get pdf val
pdf_val = multivariate_normal.pdf(
timepoint_vectors[2],
mean=cluster_means[cluster_idx],
cov=cluster_covariances[cluster_idx],
allow_singular=True)
if pdf_val > best_pdf_val:
best_pdf_val = pdf_val
hard_cluster = cluster_names[cluster_idx]
# TODO(dk) now filter consistent clusters - if they are within {error} of pdf val, then keep
# this would mean the region is fairly close to both
filtered_consistent_clusters = []
for cluster_idx in consistent_cluster_indices:
# get pdf val
pdf_val = multivariate_normal.pdf(
timepoint_vectors[2],
mean=cluster_means[cluster_idx],
cov=cluster_covariances[cluster_idx],
allow_singular=True)
if (best_pdf_val - pdf_val) < (epsilon * best_pdf_val):
filtered_consistent_clusters.append(cluster_names[cluster_idx])
#return consistent_clusters, hard_cluster
return filtered_consistent_clusters, hard_cluster
def get_consistent_soft_clusters_old(
rep1_timeseries_file,
rep2_timeseries_file,
pooled_timeseries_file,
out_dir,
prefix,
cluster_means,
cluster_covariances,
cluster_names,
ci=0.95,
corr_cutoff=0.05,
epsilon=0.10):
"""Given rep1/rep2/pooled timeseries files, go through
regions and check for consistency after soft clustering
"""
# get pdf val for confidence interval
# note that the multivariate normal pdf is distributed
# as chi2(alpha) where alpha is the chosen significance
pdf_cutoff = chi2.pdf(1-ci, cluster_means[0].shape[0])
# open all files to stream regions
region_idx = 0
with gzip.open(rep1_timeseries_file, 'r') as rep1:
with gzip.open(rep2_timeseries_file, 'r') as rep2:
with gzip.open(pooled_timeseries_file, 'r') as pooled:
while True:
# read lines
rep1_line = rep1.readline()
rep2_line = rep2.readline()
pooled_line = pooled.readline()
# break if end of file
if rep1_line == "":
break
# ignore header line
if rep1_line.strip().startswith("d"):
continue
# separate to fields
rep1_fields = rep1_line.strip().split("\t")
rep2_fields = rep2_line.strip().split("\t")
pooled_fields = pooled_line.strip().split("\t")
# split out regions and timepoints
rep1_region, rep1_timepoints = rep1_fields[0], zscore(np.array(map(float, rep1_fields[1:])))
rep2_region, rep2_timepoints = rep2_fields[0], zscore(np.array(map(float, rep2_fields[1:])))
pooled_region, pooled_timepoints = pooled_fields[0], zscore(np.array(map(float, pooled_fields[1:])))
assert rep1_region == rep2_region
assert rep1_region == pooled_region
assert rep2_region == pooled_region
# now for each cluster, check consistency and get soft/hard clusters
consistent_clusters, hard_cluster = get_consistent_soft_clusters_by_region(
cluster_means,
cluster_covariances,
cluster_names,
[rep1_timepoints, rep2_timepoints, pooled_timepoints],
pdf_cutoff,
corr_cutoff,
epsilon)
# write to cluster file
for cluster in consistent_clusters:
# write out to file
cluster_file = "{}/soft/{}.cluster_{}.soft.txt.gz".format(out_dir, prefix, cluster)
with gzip.open(cluster_file, 'a') as out:
out.write("{}\t{}\n".format(
pooled_region,
"\t".join(map(str, pooled_timepoints.tolist()))))
# save out hard clusters
if hard_cluster is not None:
hard_cluster_file = "{}/hard/{}.clusters.hard.all.txt.gz".format(out_dir, prefix)
with gzip.open(hard_cluster_file, "a") as out:
out.write("{}\t{}\t{}\n".format(
pooled_region,
"\t".join(map(str, pooled_timepoints.tolist())),
hard_cluster))
# Write out to individual hard cluster file
cluster_file = "{}/hard/{}.cluster_{}.hard.txt.gz".format(out_dir, prefix, hard_cluster)
with gzip.open(cluster_file, 'a') as out:
out.write("{}\t{}\n".format(
pooled_region,
"\t".join(map(str, pooled_timepoints.tolist()))))
region_idx += 1
if region_idx % 1000 == 0:
print region_idx
return
def get_reproducible_clusters(
clusters_file,
pooled_mat_file,
rep1_timeseries_file,
rep2_timeseries_file,
out_soft_clusters_file,
out_hard_clusters_file,
out_dir,
prefix,
ci=0.95,
corr_cutoff=0.05,
epsilon=0.10):
"""Given rep1/rep2/pooled timeseries files, go through
regions and check for consistency after soft clustering
"""
run_shell_cmd("mkdir -p {0}/soft {0}/hard".format(out_dir))
cluster_means, cluster_covariances, cluster_sizes, cluster_names = get_cluster_sufficient_stats(
clusters_file, pooled_mat_file)
# get pdf val for confidence interval
# note that the multivariate normal pdf is distributed
# as chi2(alpha) where alpha is the chosen significance
pdf_cutoff = chi2.pdf(1-ci, cluster_means[0].shape[0])
with open(out_hard_clusters_file, "a") as out:
out.write("cluster\tid\n")
with open(out_soft_clusters_file, "a") as out:
out.write("cluster\tid\n")
# open all files to stream regions
region_idx = 0
with gzip.open(rep1_timeseries_file, 'r') as rep1:
with gzip.open(rep2_timeseries_file, 'r') as rep2:
with gzip.open(pooled_mat_file, 'r') as pooled:
while True:
# read lines
rep1_line = rep1.readline()
rep2_line = rep2.readline()
pooled_line = pooled.readline()
# break if end of file
if rep1_line == "":
break
# ignore header line
if rep1_line.strip().startswith("d"):
continue
# separate to fields
rep1_fields = rep1_line.strip().split("\t")
rep2_fields = rep2_line.strip().split("\t")
pooled_fields = pooled_line.strip().split("\t")
# split out regions and timepoints
rep1_region, rep1_timepoints = rep1_fields[0], zscore(
np.array(map(float, rep1_fields[1:])))
rep2_region, rep2_timepoints = rep2_fields[0], zscore(
np.array(map(float, rep2_fields[1:])))
pooled_region, pooled_timepoints = pooled_fields[0], zscore(
np.array(map(float, pooled_fields[1:])))
assert rep1_region == rep2_region
assert rep1_region == pooled_region
assert rep2_region == pooled_region
# now for each cluster, check consistency and get soft/hard clusters
# TODO: rename this function - assign_region_to_cluster
consistent_clusters, hard_cluster = get_consistent_soft_clusters_by_region(
cluster_means,
cluster_covariances,
cluster_names,
[rep1_timepoints, rep2_timepoints, pooled_timepoints],
pdf_cutoff,
corr_cutoff,
epsilon)
# TODO: save out to cluster files instead of splitting
if len(consistent_clusters) > 0:
with open(out_soft_clusters_file, "a") as out:
for cluster in consistent_clusters:
out.write("{}\t{}\n".format(cluster, pooled_region))
if hard_cluster is not None:
with open(out_hard_clusters_file, "a") as out:
out.write("{}\t{}\n".format(hard_cluster, pooled_region))
region_idx += 1
if region_idx % 1000 == 0:
print region_idx
return None
def get_corr_mat(mat_a, mat_b):
"""given two matrices, calculate correlations between
the rows of the two
"""
corr_mat = np.zeros((mat_a.shape[0], mat_b.shape[0]))
for mat_a_idx in range(mat_a.shape[0]):
for mat_b_idx in range(mat_b.shape[0]):
corr_mat[mat_a_idx, mat_b_idx] = spearmanr(
mat_a[mat_a_idx,:], mat_b[mat_b_idx,:])[0]
return corr_mat
def run_dpgp(mat_file, prefix, out_dir, tmp_dir, subsample=False, subsample_num=5000):
"""Run DP-GP while managing files correctly
"""
run_shell_cmd("mkdir -p {}".format(out_dir))
run_shell_cmd("mkdir -p {}".format(tmp_dir))
# unzip file if needed
input_mat_file = "{}/{}.dpgp.tmp".format(
tmp_dir, os.path.basename(mat_file).split(".txt")[0])
if mat_file.endswith(".gz"):
unzip_mat = "zcat {0} > {1}".format(
mat_file, input_mat_file)
run_shell_cmd(unzip_mat)
else:
unzip_mat = "cat {0} > {1}".format(
mat_file, input_mat_file)
run_shell_cmd(unzip_mat)
# if subsample
if subsample:
subsampled_file = "{}.subsampled.tmp".format(input_mat_file.split(".tmp")[0])
# keep header
echo_header = "cat {0} | awk 'NR < 2' > {1}".format(
input_mat_file, subsampled_file)
os.system(echo_header)
# subsample
run_subsample = (
"cat {0} | "
"awk 'NR > 1' | "
"shuf -n {1} --random-source={0} >> {2}").format(
input_mat_file, subsample_num, subsampled_file)
os.system('GREPDB="{}"; /bin/bash -c "$GREPDB"'.format(run_subsample))
#os.system(run_subsample)
os.system("rm {}".format(input_mat_file))
input_mat_file = subsampled_file
# TODO change header if need be?
cluster = (
"DP_GP_cluster.py -i {} -o {} --fast -p pdf --plot").format(
input_mat_file,
"{0}/{1}".format(out_dir, prefix))
run_shell_cmd(cluster)
# delete the unzipped tmp files <- don't need to since subsampled and small
#run_shell_cmd("rm {}/*.dpgp.subsampled.tmp".format(tmp_dir))
# outfile name
optimal_clusters = "{}/{}_optimal_clustering.txt".format(out_dir, prefix)
return optimal_clusters
# TODO this should be a workflow, not an analysis
def get_consistent_dpgp_trajectories(
matrices,
out_dir,
prefix,
raw_cluster_min_size=1000,
raw_cluster_reject_null_ci_interval=0.999,
rep_to_cluster_ci_interval=0.95,
rep_to_cluster_corr_cutoff=0.05,
epsilon=0.10):
"""Given count matrices for replicates, gets
consistent trajectories
Do this by calling trajectories using the pooled data
(to increase "read depth") this leads to more fine grained
trajectories. Then, for each region, get the max similarity
to a trajectory (mean val). If consistent, keep. Else,
throw away.
"""
run_shell_cmd("mkdir -p {}".format(out_dir))
rep1_timeseries_file = matrices[0]
rep2_timeseries_file = matrices[1]
pooled_timeseries_file = matrices[2]
assert ".gz" in rep1_timeseries_file
assert ".gz" in rep2_timeseries_file
assert ".gz" in pooled_timeseries_file
# run DP GP on pooled data
pooled_dir = "{}/pooled".format(out_dir)
pooled_cluster_file = "{0}/{1}.pooled_optimal_clustering.txt".format(
pooled_dir, prefix)
if not os.path.isdir(pooled_dir):
# unzip files as needed
pooled_unzipped_file = "{}/{}.tmp".format(
out_dir,
os.path.basename(pooled_timeseries_file).split(".mat")[0])
unzip_mat = ("zcat {0} > {1}").format(
pooled_timeseries_file, pooled_unzipped_file)
run_shell_cmd(unzip_mat)
# TODO(dk) convert header as needed?
# cluster
run_shell_cmd("mkdir -p {}".format(pooled_dir))
cluster = (
"DP_GP_cluster.py -i {} -o {} -p pdf --plot --fast").format(
pooled_unzipped_file,
"{0}/{1}.pooled".format(
pooled_dir, prefix))
run_shell_cmd(cluster)
# delete the unzipped tmp files
run_shell_cmd("rm {}/*.tmp".format(out_dir))
# stage 2: soft clustering
# TODO(dk) separate this out from DPGP consistency?
soft_cluster_dir = "{}/soft".format(out_dir)
hard_cluster_dir = "{}/hard".format(out_dir)
if not os.path.isdir(soft_cluster_dir):
run_shell_cmd("mkdir -p {}".format(soft_cluster_dir))
run_shell_cmd("mkdir -p {}".format(hard_cluster_dir))
# for each cluster (from POOLED clusters), get the mean and covariance matrix (assuming multivariate normal)
cluster_means, cluster_covariances, cluster_sizes, cluster_names = get_cluster_sufficient_stats(
pooled_cluster_file, pooled_timeseries_file)
# filter clusters: remove low membership (<1k) clusters and those that don't reject the null
# note that "low membership" was empirical - strong split between <500 and >1400.
# rejecting null - confidence interval of .999 (this is stronger as an FDR correction)
# TODO(dk) pull out params here
cluster_means, cluster_covariances, cluster_sizes, cluster_names = filter_null_and_small_clusters(
cluster_means,
cluster_covariances,
cluster_sizes,
cluster_names,
ci=raw_cluster_reject_null_ci_interval,
size_cutoff=raw_cluster_min_size)
# now extract consistent soft clusters
# ie, for each region, check each cluster to see if it belongs after checking
# confidence interval (multivariate normal), pearson and spearmans.
# TODO(dk) pull out params here
get_consistent_soft_clusters(
rep1_timeseries_file,
rep2_timeseries_file,
pooled_timeseries_file,
out_dir,
prefix,
cluster_means,
cluster_covariances,
cluster_names,
ci=rep_to_cluster_ci_interval,
corr_cutoff=rep_to_cluster_corr_cutoff,
epsilon=epsilon)
# TODO separate this out as a viz analysis
# sanity check - replot the clusters
plot_dir = "{}/soft/plot".format(out_dir)
if not os.path.isdir(plot_dir):
run_shell_cmd("mkdir -p {}".format(plot_dir))
# use R to plot
plot_soft_clusters = ("plot_soft_trajectories.R {0}/soft/plot {0}/soft/{1}").format(
out_dir, "*soft*.gz")
print plot_soft_clusters
run_shell_cmd(plot_soft_clusters)
plot_dir = "{}/hard/plot".format(out_dir)
if not os.path.isdir(plot_dir):
run_shell_cmd("mkdir -p {}".format(plot_dir))
# use R to plot
plot_hard_clusters = ("plot_soft_trajectories.R {0}/hard/plot {0}/hard/{1}").format(
out_dir, "*cluster_*hard*.gz")
print plot_hard_clusters
run_shell_cmd(plot_hard_clusters)
# TODO(dk): return the soft and hard clusters as lists
return
def split_mat_to_clusters(cluster_file, cluster_mat, out_dir, prefix):
"""helper function to split a mat (given a cluster file)
into cluster mat files and also lists
"""
# use pandas
clusters = pd.read_table(cluster_file, header=True)
data = pd.read_table(cluster_mat, header=True)
import ipdb
ipdb.set_trace()
return None
def plot_clusters(
cluster_file,
cluster_subsample_file,
cluster_mat,
out_dir,
prefix,
plot_individual=True):
"""plots clusters given in the cluster file using the
data in cluster mat
"""
# assertions
assert os.path.isdir(out_dir)
if "atac" in cluster_file:
title = "'Accessibility (ATAC)'"
else:
title = "'Expression (RNA-seq)'"
# heatmap plot
r_plot_heatmap = (
"viz.plot_timeseries_heatmap.R "
"{0} {1} {2} {3} {4}").format(
cluster_subsample_file, cluster_mat, out_dir, prefix, title)
print r_plot_heatmap
run_shell_cmd(r_plot_heatmap)
# individual clusters
if plot_individual:
r_plot_clusters = (
"viz.plot_timeseries_clusters.R "
"{0} {1} {2} {3}").format(
cluster_file, cluster_mat, out_dir, prefix)
run_shell_cmd(r_plot_clusters)
return None
def flip_positions(position_list, start, middle, stop):
"""for reorder clusers below: flip from start:stop around middle.
"""
tmp = position_list[start:stop]
position_list[start:middle] = tmp[middle:stop]
position_list[middle:stop] = tmp[start:middle]
return position_list
def get_ordered_tree_nodes(tree):
"""Recursively go through tree to collect nodes
This will be in the order of leaveslist
"""
nodes = []
# check left
if tree.left.count == 1:
nodes.append(tree.left.id)
else:
nodes += get_ordered_tree_nodes(tree.left)
# check right
if tree.right.count == 1:
nodes.append(tree.right.id)
else:
nodes += get_ordered_tree_nodes(tree.right)
# sum up and return
return nodes
def reorder_tree(tree, cluster_means):
"""Recursive tool to reorder tree
"""
# go left
if tree.left.count == 1:
tree.left = tree.left
left_nodes = [tree.left.id]
else:
# adjust the tree
tree.left = reorder_tree(tree.left, cluster_means)
left_nodes = get_ordered_tree_nodes(tree.left)
# go right
if tree.right.count == 1:
tree.right = tree.right
right_nodes = [tree.right.id]
else:
# adjust the tree
tree.right = reorder_tree(tree.right, cluster_means)
right_nodes = get_ordered_tree_nodes(tree.right)
# calculate average cluster means for each set
left_cluster_mean = np.sum(cluster_means[left_nodes,:], axis=0)
right_cluster_mean = np.sum(cluster_means[right_nodes,:], axis=0)
# extract the max
left_max_idx = np.argmax(left_cluster_mean)
right_max_idx = np.argmax(right_cluster_mean)
# if max is at the edges, calculate slope to nearest 0
flip = False
if left_max_idx != right_max_idx:
# good to go, carry on
if left_max_idx > right_max_idx:
flip = True
else:
# if left edge:
if left_max_idx == 0:
left_slope = left_cluster_mean[0] - left_cluster_mean[3]
right_slope = right_cluster_mean[0] - right_cluster_mean[3]
if left_slope < right_slope:
flip = False
# if right:
elif left_max_idx == cluster_means.shape[1] - 1:
left_slope = left_cluster_mean[-1] - left_cluster_mean[-3]
right_slope = right_cluster_mean[-1] - right_cluster_mean[-3]
if left_slope > right_slope:
flip = True
# if middle:
else:
left_side_max_idx = np.argmax(left_cluster_mean[[left_max_idx-1, left_max_idx+1]])
right_side_max_idx = np.argmax(right_cluster_mean[[right_max_idx-1, right_max_idx+1]])
if left_side_max_idx > right_side_max_idx:
flip = True
#import ipdb
#ipdb.set_trace()
# reorder accordingly
if flip == True:
right_tmp = tree.right
left_tmp = tree.left
tree.right = left_tmp
tree.left = right_tmp
return tree
def reorder_clusters(cluster_file, cluster_mat, out_cluster_file):
"""Sort clusters by hclust similarity (hierarchical clustering in sklearn)
"""
from scipy.cluster.hierarchy import linkage, leaves_list, fcluster, to_tree
#from scipy.spatial.distance import pdist, squareform
from scipy.stats import zscore
# first extract cluster means
cluster_means, cluster_covariances, cluster_sizes, cluster_names = get_cluster_sufficient_stats(
cluster_file, cluster_mat)
means_z = zscore(np.array(cluster_means), axis=0)
#cluster_dist = pdist(np.array(cluster_means), "euclidean")
hclust = linkage(means_z, method="ward")
# this is all the reordering code below
if False:
# using leaves_list and fcluster, determine split and reverse the FIRST half
top_cut = fcluster(hclust, 2, criterion="maxclust")
ordered_leaves = leaves_list(hclust)
for i in xrange(ordered_leaves.shape[0]):
current_leaf = ordered_leaves[i]
if top_cut[current_leaf] == 2:
# found the leaf
split_point = i
break
# take left side of dendrogram and reverse
# a recursive reordering of leaves by weight?
ordered_leaves[0:split_point] = np.flip(ordered_leaves[0:split_point], axis=0)
#ordered_leaves[split_point:] = np.flip(ordered_leaves[split_point:], axis=0)
print ordered_leaves + 1
else:
# try a recursive reordering
hclust_tree = to_tree(hclust)
old_ordered_leaves = leaves_list(hclust)
reordered_tree = reorder_tree(hclust_tree, np.array(cluster_means))
ordered_leaves = np.array(get_ordered_tree_nodes(reordered_tree))
print old_ordered_leaves
print ordered_leaves
# build renumbering dict
renumbering = dict(zip((ordered_leaves+1).tolist(), range(1, len(ordered_leaves)+1)))
print renumbering
# read in cluster file
cluster_list = pd.read_table(cluster_file)
# renumber and sort
cluster_list["cluster"].replace(renumbering, inplace=True)
cluster_list = cluster_list.sort_values("cluster")
cluster_list.to_csv(out_cluster_file, sep="\t", index=False)
return None
def split_clusters(cluster_file):
"""Split cluster file into files of ids per cluster
"""
cluster_data = pd.read_table(cluster_file)
cluster_data.columns = ["cluster", "id"]
cluster_names = cluster_data["cluster"].unique().tolist()
for cluster_name in cluster_names:
# get that subset
single_cluster = cluster_data.loc[cluster_data["cluster"] == cluster_name]
# and write out
out_file = "{}.cluster_{}.txt.gz".format(
cluster_file.split(".clustering")[0],
cluster_name)
single_cluster.to_csv(
out_file,
columns=["id"],
compression="gzip",
sep='\t',
header=False,
index=False)
return None
| vervacity/ggr-project | ggr/analyses/timeseries.py | Python | mit | 34,599 | [
"Gaussian"
] | ab53f2400382e9d7190ac925e83cd4302f8c31d0888e3ffd336c26ea2f68b1a6 |
# This file is part of Merlin/Arthur.
# Merlin/Arthur is the Copyright (C)2009,2010 of Elliot Rosemarine.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from django.conf.urls import include, url
from Arthur.views import home
from Arthur.views import lookup
from Arthur.views import dashboard
from Arthur.views import members
from Arthur.views import planet
from Arthur.views import galaxy
from Arthur.views import alliance
from Arthur.views import search
from Arthur.views import exiles
from Arthur.views import attack
from Arthur.views import scans
from Arthur.views import graphs
urlpatterns = [
url(r'', include(home)),
url(r'', include(lookup)),
url(r'', include(dashboard)),
url(r'', include(members)),
url(r'', include(planet)),
url(r'', include(galaxy)),
url(r'', include(alliance)),
url(r'', include(search)),
url(r'', include(exiles)),
url(r'', include(attack)),
url(r'', include(scans)),
url(r'', include(graphs)),
]
| d7415/merlin | Arthur/views/__init__.py | Python | gpl-2.0 | 1,785 | [
"Galaxy"
] | c0ce8c033357a61ae8ac2474c048d1749ea212c49145b43128be4de6e58e8dbe |
#!/usr/bin/env python3
import itertools
from collections import defaultdict
import logging
from operator import mul
import networkx as nx
import numpy as np
import pandas as pd
from pgmpy.base import DirectedGraph
from pgmpy.factors.discrete import TabularCPD, JointProbabilityDistribution, DiscreteFactor
from pgmpy.factors.continuous import ContinuousFactor
from pgmpy.independencies import Independencies
from pgmpy.extern import six
from pgmpy.extern.six.moves import range, reduce
from pgmpy.models.MarkovModel import MarkovModel
class BayesianModel(DirectedGraph):
"""
Base class for bayesian model.
A models stores nodes and edges with conditional probability
distribution (cpd) and other attributes.
models hold directed edges. Self loops are not allowed neither
multiple (parallel) edges.
Nodes can be any hashable python object.
Edges are represented as links between nodes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object.
Examples
--------
Create an empty bayesian model with no nodes and no edges.
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super(BayesianModel, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.cpds = []
self.cardinalities = defaultdict(int)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable python object.
Examples
--------
>>> from pgmpy.models import BayesianModel/home/abinash/software_packages/numpy-1.7.1
>>> G = BayesianModel()
>>> G.add_nodes_from(['grade', 'intel'])
>>> G.add_edge('grade', 'intel')
"""
if u == v:
raise ValueError('Self loops are not allowed.')
if u in self.nodes() and v in self.nodes() and nx.has_path(self, v, u):
raise ValueError(
'Loops are not allowed. Adding the edge from (%s->%s) forms a loop.' % (u, v))
else:
super(BayesianModel, self).add_edge(u, v, **kwargs)
def remove_node(self, node):
"""
Remove node from the model.
Removing a node also removes all the associated edges, removes the CPD
of the node and marginalizes the CPDs of it's children.
Parameters
----------
node : node
Node which is to be removed from the model.
Returns
-------
None
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.models import BayesianModel
>>> model = BayesianModel([('A', 'B'), ('B', 'C'),
... ('A', 'D'), ('D', 'C')])
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
... columns=['A', 'B', 'C', 'D'])
>>> model.fit(values)
>>> model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7f28248e2438>,
<TabularCPD representing P(B:2 | A:2) at 0x7f28248e23c8>,
<TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>,
<TabularCPD representing P(D:2 | A:2) at 0x7f28248e26a0>]
>>> model.remove_node('A')
>>> model.get_cpds()
[<TabularCPD representing P(B:2) at 0x7f28248e23c8>,
<TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>,
<TabularCPD representing P(D:2) at 0x7f28248e26a0>]
"""
affected_nodes = [v for u, v in self.edges() if u == node]
for affected_node in affected_nodes:
node_cpd = self.get_cpds(node=affected_node)
if node_cpd:
node_cpd.marginalize([node], inplace=True)
if self.get_cpds(node=node):
self.remove_cpds(node)
super(BayesianModel, self).remove_node(node)
def remove_nodes_from(self, nodes):
"""
Remove multiple nodes from the model.
Removing a node also removes all the associated edges, removes the CPD
of the node and marginalizes the CPDs of it's children.
Parameters
----------
nodes : list, set (iterable)
Nodes which are to be removed from the model.
Returns
-------
None
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.models import BayesianModel
>>> model = BayesianModel([('A', 'B'), ('B', 'C'),
... ('A', 'D'), ('D', 'C')])
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
... columns=['A', 'B', 'C', 'D'])
>>> model.fit(values)
>>> model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7f28248e2438>,
<TabularCPD representing P(B:2 | A:2) at 0x7f28248e23c8>,
<TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>,
<TabularCPD representing P(D:2 | A:2) at 0x7f28248e26a0>]
>>> model.remove_nodes_from(['A', 'B'])
>>> model.get_cpds()
[<TabularCPD representing P(C:2 | D:2) at 0x7f28248e2a58>,
<TabularCPD representing P(D:2) at 0x7f28248e26d8>]
"""
for node in nodes:
self.remove_node(node)
def add_cpds(self, *cpds):
"""
Add CPD (Conditional Probability Distribution) to the Bayesian Model.
Parameters
----------
cpds : list, set, tuple (array-like)
List of CPDs which will be associated with the model
EXAMPLE
-------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete.CPD import TabularCPD
>>> student = BayesianModel([('diff', 'grades'), ('intel', 'grades')])
>>> grades_cpd = TabularCPD('grades', 3, [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'], evidence_card=[2, 3])
>>> student.add_cpds(grades_cpd)
+------+-----------------------+---------------------+
|diff: | easy | hard |
+------+------+------+---------+------+------+-------+
|intel:| dumb | avg | smart | dumb | avg | smart |
+------+------+------+---------+------+------+-------+
|gradeA| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeB| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeC| 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
+------+------+------+---------+------+------+-------+
"""
for cpd in cpds:
if not isinstance(cpd, (TabularCPD, ContinuousFactor)):
raise ValueError('Only TabularCPD or ContinuousFactor can be added.')
if set(cpd.scope()) - set(cpd.scope()).intersection(
set(self.nodes())):
raise ValueError('CPD defined on variable not in the model', cpd)
for prev_cpd_index in range(len(self.cpds)):
if self.cpds[prev_cpd_index].variable == cpd.variable:
logging.warning("Replacing existing CPD for {var}".format(var=cpd.variable))
self.cpds[prev_cpd_index] = cpd
break
else:
self.cpds.append(cpd)
def get_cpds(self, node=None):
"""
Returns the cpd of the node. If node is not specified returns all the CPDs
that have been added till now to the graph
Parameter
---------
node: any hashable python object (optional)
The node whose CPD we want. If node not specified returns all the
CPDs added to the model.
Returns
-------
A list of TabularCPDs.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.get_cpds()
"""
if node is not None:
if node not in self.nodes():
raise ValueError('Node not present in the Directed Graph')
for cpd in self.cpds:
if cpd.variable == node:
return cpd
else:
return None
else:
return self.cpds
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds: TabularCPD object
A CPD object on any subset of the variables of the model which
is to be associated with the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.remove_cpds(cpd)
"""
for cpd in cpds:
if isinstance(cpd, six.string_types):
cpd = self.get_cpds(cpd)
self.cpds.remove(cpd)
def get_cardinality(self, node=None):
"""
Returns the cardinality of the node. Throws an error if the CPD for the
queried node hasn't been added to the network.
Parameters
----------
node: Any hashable python object(optional).
The node whose cardinality we want. If node is not specified returns a
dictionary with the given variable as keys and their respective cardinality
as values.
Returns
-------
int or dict : If node is specified returns the cardinality of the node.
If node is not specified returns a dictionary with the given
variable as keys and their respective cardinality as values.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_diff = TabularCPD('diff',2,[[0.6,0.4]]);
>>> cpd_intel = TabularCPD('intel',2,[[0.7,0.3]]);
>>> cpd_grade = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_diff,cpd_intel,cpd_grade)
>>> student.get_cardinality()
defaultdict(int, {'diff': 2, 'grade': 2, 'intel': 2})
>>> student.get_cardinality('intel')
2
"""
if node:
return self.get_cpds(node).cardinality[0]
else:
cardinalities = defaultdict(int)
for cpd in self.cpds:
cardinalities[cpd.variable] = cpd.cardinality[0]
return cardinalities
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if cpd is None:
raise ValueError('No CPD associated with {}'.format(node))
elif isinstance(cpd, (TabularCPD, ContinuousFactor)):
evidence = cpd.get_evidence()
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have "
"proper parents associated with it.".format(node=node))
if not cpd.is_valid_cpd():
raise ValueError("Sum or integral of conditional probabilites for node {node}"
" is not equal to 1.".format(node=node))
return True
def _get_ancestors_of(self, obs_nodes_list):
"""
Returns a dictionary of all ancestors of all the observed nodes including the
node itself.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> model = BayesianModel([('D', 'G'), ('I', 'G'), ('G', 'L'),
... ('I', 'L')])
>>> model._get_ancestors_of('G')
{'D', 'G', 'I'}
>>> model._get_ancestors_of(['G', 'I'])
{'D', 'G', 'I'}
"""
if not isinstance(obs_nodes_list, (list, tuple)):
obs_nodes_list = [obs_nodes_list]
for node in obs_nodes_list:
if node not in self.nodes():
raise ValueError('Node {s} not in not in graph'.format(s=node))
ancestors_list = set()
nodes_list = set(obs_nodes_list)
while nodes_list:
node = nodes_list.pop()
if node not in ancestors_list:
nodes_list.update(self.predecessors(node))
ancestors_list.add(node)
return ancestors_list
def active_trail_nodes(self, variables, observed=None):
"""
Returns a dictionary with the given variables as keys and all the nodes reachable
from that respective variable as values.
Parameters
----------
variables: str or array like
variables whose active trails are to be found.
observed : List of nodes (optional)
If given the active trails would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff': {'diff', 'grades'}}
>>> student.active_trail_nodes(['diff', 'intel'], observed='grades')
{'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1
"""
if observed:
observed_list = observed if isinstance(observed, (list, tuple)) else [observed]
else:
observed_list = []
ancestors_list = self._get_ancestors_of(observed_list)
# Direction of flow of information
# up -> from parent to child
# down -> from child to parent
active_trails = {}
for start in variables if isinstance(variables, (list, tuple)) else [variables]:
visit_list = set()
visit_list.add((start, 'up'))
traversed_list = set()
active_nodes = set()
while visit_list:
node, direction = visit_list.pop()
if (node, direction) not in traversed_list:
if node not in observed_list:
active_nodes.add(node)
traversed_list.add((node, direction))
if direction == 'up' and node not in observed_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
for child in self.successors(node):
visit_list.add((child, 'down'))
elif direction == 'down':
if node not in observed_list:
for child in self.successors(node):
visit_list.add((child, 'down'))
if node in ancestors_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
active_trails[start] = active_nodes
return active_trails
def local_independencies(self, variables):
"""
Returns an instance of Independencies containing the local independencies
of each of the variables.
Parameters
----------
variables: str or array like
variables whose local independencies are to be found.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
>>> ('grade', 'letter'), ('intel', 'SAT')])
>>> ind = student.local_independencies('grade')
>>> ind
(grade _|_ SAT | diff, intel)
"""
def dfs(node):
"""
Returns the descendents of node.
Since Bayesian Networks are acyclic, this is a very simple dfs
which does not remember which nodes it has visited.
"""
descendents = []
visit = [node]
while visit:
n = visit.pop()
neighbors = list(self.neighbors(n))
visit.extend(neighbors)
descendents.extend(neighbors)
return descendents
independencies = Independencies()
for variable in variables if isinstance(variables, (list, tuple)) else [variables]:
non_descendents = set(self.nodes()) - {variable} - set(dfs(variable))
parents = set(self.get_parents(variable))
if non_descendents - parents:
independencies.add_assertions([variable, non_descendents - parents, parents])
return independencies
def is_active_trail(self, start, end, observed=None):
"""
Returns True if there is any active trail between start and end node
Parameters
----------
start : Graph Node
end : Graph Node
observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed.
additional_observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed along with
the nodes marked as observed in the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grades', 'letter'),
... ('intel', 'sat')])
>>> student.is_active_trail('diff', 'intel')
False
>>> student.is_active_trail('grades', 'sat')
True
"""
if end in self.active_trail_nodes(start, observed)[start]:
return True
else:
return False
def get_independencies(self, latex=False):
"""
Computes independencies in the Bayesian Network, by checking d-seperation.
Parameters
----------
latex: boolean
If latex=True then latex string of the independence assertion
would be created.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> chain = BayesianModel([('X', 'Y'), ('Y', 'Z')])
>>> chain.get_independencies()
(X _|_ Z | Y)
(Z _|_ X | Y)
"""
independencies = Independencies()
for start in (self.nodes()):
rest = set(self.nodes()) - {start}
for r in range(len(rest)):
for observed in itertools.combinations(rest, r):
d_seperated_variables = rest - set(observed) - set(
self.active_trail_nodes(start, observed=observed)[start])
if d_seperated_variables:
independencies.add_assertions([start, d_seperated_variables, observed])
independencies.reduce()
if not latex:
return independencies
else:
return independencies.latex_string()
def to_markov_model(self):
"""
Converts bayesian model to markov model. The markov model created would
be the moral graph of the bayesian model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> mm = G.to_markov_model()
>>> mm.nodes()
['diff', 'grade', 'intel', 'SAT', 'letter']
>>> mm.edges()
[('diff', 'intel'), ('diff', 'grade'), ('intel', 'grade'),
('intel', 'SAT'), ('grade', 'letter')]
"""
moral_graph = self.moralize()
mm = MarkovModel(moral_graph.edges())
mm.add_factors(*[cpd.to_factor() for cpd in self.cpds])
return mm
def to_junction_tree(self):
"""
Creates a junction tree (or clique tree) for a given bayesian model.
For converting a Bayesian Model into a Clique tree, first it is converted
into a Markov one.
For a given markov model (H) a junction tree (G) is a graph
1. where each node in G corresponds to a maximal clique in H
2. each sepset in G separates the variables strictly on one side of the
edge to other.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> sat_cpd = TabularCPD('SAT', 2,
... [[0.1, 0.2, 0.7],
... [0.9, 0.8, 0.3]],
... evidence=['intel'], evidence_card=[3])
>>> letter_cpd = TabularCPD('letter', 2,
... [[0.1, 0.4, 0.8],
... [0.9, 0.6, 0.2]],
... evidence=['grade'], evidence_card=[3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)
>>> jt = G.to_junction_tree()
"""
mm = self.to_markov_model()
return mm.to_junction_tree()
def fit(self, data, estimator=None, state_names=[], complete_samples_only=True, **kwargs):
"""
Estimates the CPD for each variable based on a given data set.
Parameters
----------
data: pandas DataFrame object
DataFrame object with column names identical to the variable names of the network.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
estimator: Estimator class
One of:
- MaximumLikelihoodEstimator (default)
- BayesianEstimator: In this case, pass 'prior_type' and either 'pseudo_counts'
or 'equivalent_sample_size' as additional keyword arguments.
See `BayesianEstimator.get_parameters()` for usage.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states
that the variable can take. If unspecified, the observed values
in the data set are taken to be the only possible states.
complete_samples_only: bool (default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> model.fit(data)
>>> model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7fb98a7d50f0>,
<TabularCPD representing P(B:2) at 0x7fb98a7d5588>,
<TabularCPD representing P(C:2 | A:2, B:2) at 0x7fb98a7b1f98>]
"""
from pgmpy.estimators import MaximumLikelihoodEstimator, BayesianEstimator, BaseEstimator
if estimator is None:
estimator = MaximumLikelihoodEstimator
else:
if not issubclass(estimator, BaseEstimator):
raise TypeError("Estimator object should be a valid pgmpy estimator.")
_estimator = estimator(self, data, state_names=state_names,
complete_samples_only=complete_samples_only)
cpds_list = _estimator.get_parameters(**kwargs)
self.add_cpds(*cpds_list)
def predict(self, data):
"""
Predicts states of all the missing variables.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variables in the model.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> train_data = values[:800]
>>> predict_data = values[800:]
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> predict_data = predict_data.copy()
>>> predict_data.drop('E', axis=1, inplace=True)
>>> y_pred = model.predict(predict_data)
>>> y_pred
E
800 0
801 1
802 1
803 1
804 0
... ...
993 0
994 0
995 1
996 1
997 0
998 0
999 0
"""
from pgmpy.inference import VariableElimination
if set(data.columns) == set(self.nodes()):
raise ValueError("No variable missing in data. Nothing to predict")
elif set(data.columns) - set(self.nodes()):
raise ValueError("Data has variables which are not in the model")
missing_variables = set(self.nodes()) - set(data.columns)
pred_values = defaultdict(list)
# Send state_names dict from one of the estimated CPDs to the inference class.
model_inference = VariableElimination(self, state_names=self.get_cpds()[0].state_names)
for index, data_point in data.iterrows():
states_dict = model_inference.map_query(variables=missing_variables, evidence=data_point.to_dict())
for k, v in states_dict.items():
pred_values[k].append(v)
return pd.DataFrame(pred_values, index=data.index)
def predict_probability(self, data):
"""
Predicts probabilities of all states of the missing variables.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variables in the model.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(100, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> train_data = values[:80]
>>> predict_data = values[80:]
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> predict_data = predict_data.copy()
>>> predict_data.drop('B', axis=1, inplace=True)
>>> y_prob = model.predict_probability(predict_data)
>>> y_prob
B_0 B_1
80 0.439178 0.560822
81 0.581970 0.418030
82 0.488275 0.511725
83 0.581970 0.418030
84 0.510794 0.489206
85 0.439178 0.560822
86 0.439178 0.560822
87 0.417124 0.582876
88 0.407978 0.592022
89 0.429905 0.570095
90 0.581970 0.418030
91 0.407978 0.592022
92 0.429905 0.570095
93 0.429905 0.570095
94 0.439178 0.560822
95 0.407978 0.592022
96 0.559904 0.440096
97 0.417124 0.582876
98 0.488275 0.511725
99 0.407978 0.592022
"""
from pgmpy.inference import VariableElimination
if set(data.columns) == set(self.nodes()):
raise ValueError("No variable missing in data. Nothing to predict")
elif set(data.columns) - set(self.nodes()):
raise ValueError("Data has variables which are not in the model")
missing_variables = set(self.nodes()) - set(data.columns)
pred_values = defaultdict(list)
model_inference = VariableElimination(self)
for index, data_point in data.iterrows():
states_dict = model_inference.query(variables=missing_variables, evidence=data_point.to_dict())
for k, v in states_dict.items():
for l in range(len(v.values)):
state = self.get_cpds(k).state_names[k][l]
pred_values[k + '_' + str(state)].append(v.values[l])
return pd.DataFrame(pred_values, index=data.index)
def get_factorized_product(self, latex=False):
# TODO: refer to IMap class for explanation why this is not implemented.
pass
def get_immoralities(self):
"""
Finds all the immoralities in the model
A v-structure X -> Z <- Y is an immorality if there is no direct edge between X and Y .
Returns
-------
set: A set of all the immoralities in the model
Examples
---------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> student.get_immoralities()
{('diff','intel')}
"""
immoralities = set()
for node in self.nodes():
for parents in itertools.combinations(self.predecessors(node), 2):
if not self.has_edge(parents[0], parents[1]) and not self.has_edge(parents[1], parents[0]):
immoralities.add(tuple(sorted(parents)))
return immoralities
def is_iequivalent(self, model):
"""
Checks whether the given model is I-equivalent
Two graphs G1 and G2 are said to be I-equivalent if they have same skeleton
and have same set of immoralities.
Note: For same skeleton different names of nodes can work but for immoralities
names of nodes must be same
Parameters
----------
model : A Bayesian model object, for which you want to check I-equivalence
Returns
--------
boolean : True if both are I-equivalent, False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel()
>>> G.add_edges_from([('V', 'W'), ('W', 'X'),
... ('X', 'Y'), ('Z', 'Y')])
>>> G1 = BayesianModel()
>>> G1.add_edges_from([('W', 'V'), ('X', 'W'),
... ('X', 'Y'), ('Z', 'Y')])
>>> G.is_iequivalent(G1)
True
"""
if not isinstance(model, BayesianModel):
raise TypeError('model must be an instance of Bayesian Model')
skeleton = nx.algorithms.isomorphism.GraphMatcher(self.to_undirected(), model.to_undirected())
if skeleton.is_isomorphic() and self.get_immoralities() == model.get_immoralities():
return True
return False
def is_imap(self, JPD):
"""
Checks whether the bayesian model is Imap of given JointProbabilityDistribution
Parameters
-----------
JPD : An instance of JointProbabilityDistribution Class, for which you want to
check the Imap
Returns
--------
boolean : True if bayesian model is Imap for given Joint Probability Distribution
False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> G.is_imap(JPD)
True
"""
if not isinstance(JPD, JointProbabilityDistribution):
raise TypeError("JPD must be an instance of JointProbabilityDistribution")
factors = [cpd.to_factor() for cpd in self.get_cpds()]
factor_prod = reduce(mul, factors)
JPD_fact = DiscreteFactor(JPD.variables, JPD.cardinality, JPD.values)
if JPD_fact == factor_prod:
return True
else:
return False
def copy(self):
"""
Returns a copy of the model.
Returns
-------
BayesianModel: Copy of the model on which the method was called.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> model = BayesianModel([('A', 'B'), ('B', 'C')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_b = TabularCPD('B', 2, [[0.3, 0.7], [0.7, 0.3]],
evidence=['A'],
evidence_card=[2])
>>> cpd_c = TabularCPD('C', 2, [[0.1, 0.9], [0.9, 0.1]],
evidence=['B'],
evidence_card=[2])
>>> model.add_cpds(cpd_a, cpd_b, cpd_c)
>>> copy_model = model.copy()
>>> copy_model.nodes()
['C', 'A', 'B']
>>> copy_model.edges()
[('A', 'B'), ('B', 'C')]
>>> copy_model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7f2824930a58>,
<TabularCPD representing P(B:2 | A:2) at 0x7f2824930a90>,
<TabularCPD representing P(C:2 | B:2) at 0x7f2824944240>]
"""
model_copy = BayesianModel()
model_copy.add_nodes_from(self.nodes())
model_copy.add_edges_from(self.edges())
if self.cpds:
model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])
return model_copy
def get_markov_blanket(self, node):
"""
Returns a markov blanket for a random variable. In the case
of Bayesian Networks, the markov blanket is the set of
node's parents, its children and its children's other parents.
Returns
-------
list(blanket_nodes): List of nodes contained in Markov Blanket
Parameters
----------
node: string, int or any hashable python object.
The node whose markov blanket would be returned.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> G = BayesianModel([('x', 'y'), ('z', 'y'), ('y', 'w'), ('y', 'v'), ('u', 'w'),
('s', 'v'), ('w', 't'), ('w', 'm'), ('v', 'n'), ('v', 'q')])
>>> bayes_model.get_markov_blanket('y')
['s', 'w', 'x', 'u', 'z', 'v']
"""
children = self.get_children(node)
parents = self.get_parents(node)
blanket_nodes = children + parents
for child_node in children:
blanket_nodes.extend(self.get_parents(child_node))
blanket_nodes = set(blanket_nodes)
blanket_nodes.remove(node)
return list(blanket_nodes)
| khalibartan/pgmpy | pgmpy/models/BayesianModel.py | Python | mit | 39,747 | [
"VisIt"
] | cc6ceed385843667962f6ae53e052c3678ac86fc4d617bfbf58060865d46f7c9 |
"""
.. _tut_forward:
Head model and forward computation
==================================
The aim of this tutorial is to be a getting started for forward
computation.
For more extensive details and presentation of the general
concepts for forward modeling. See :ref:`ch_forward`.
"""
import mne
from mne.datasets import sample
data_path = sample.data_path()
# the raw file containing the channel location + types
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# The paths to freesurfer reconstructions
subjects_dir = data_path + '/subjects'
subject = 'sample'
###############################################################################
# Computing the forward operator
# ------------------------------
#
# To compute a forward operator we need:
#
# - a ``-trans.fif`` file that contains the coregistration info.
# - a source space
# - the BEM surfaces
###############################################################################
# Compute and visualize BEM surfaces
# ----------------------------------
#
# The BEM surfaces are the triangulations of the interfaces between different
# tissues needed for forward computation. These surfaces are for example
# the inner skull surface, the outer skull surface and the outer skill
# surface.
#
# Computing the BEM surfaces requires FreeSurfer and makes use of either of
# the two following command line tools:
#
# - :ref:`gen_mne_watershed_bem`
# - :ref:`gen_mne_flash_bem`
#
# Here we'll assume it's already computed. It takes a few minutes per subject.
#
# For EEG we use 3 layers (inner skull, outer skull, and skin) while for
# MEG 1 layer (inner skull) is enough.
#
# Let's look at these surfaces. The function :func:`mne.viz.plot_bem`
# assumes that you have the the *bem* folder of your subject FreeSurfer
# reconstruction the necessary files.
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', orientation='coronal')
###############################################################################
# Visualization the coregistration
# --------------------------------
#
# The coregistration is operation that allows to position the head and the
# sensors in a common coordinate system. In the MNE software the transformation
# to align the head and the sensors in stored in a so-called **trans file**.
# It is a FIF file that ends with -trans.fif. It can be obtained with
# mne_analyze (Unix tools), mne.gui.coregistration (in Python) or mrilab
# if you're using a Neuromag system.
#
# For the Python version see func:`mne.gui.coregistration`
#
# Here we assume the coregistration is done, so we just visually check the
# alignment with the following code.
# The transformation file obtained by coregistration
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
info = mne.io.read_info(raw_fname)
mne.viz.plot_trans(info, trans, subject=subject, dig=True,
meg_sensors=True, subjects_dir=subjects_dir)
###############################################################################
# Compute Source Space
# --------------------
#
# The source space defines the position of the candidate source locations.
# The following code compute such a cortical source space with
# an OCT-6 resolution.
#
# See :ref:`setting_up_source_space` for details on source space definition
# and spacing parameter.
src = mne.setup_source_space(subject, spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
print(src)
###############################################################################
# ``src`` contains two parts, one for the left hemisphere (4098 locations) and
# one for the right hemisphere (4098 locations). Sources can be visualized on
# top of the BEM surfaces.
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=src, orientation='coronal')
###############################################################################
# However, only sources that lie in the plotted MRI slices are shown.
# Let's write a few lines of mayavi to see all sources.
import numpy as np # noqa
from mayavi import mlab # noqa
from surfer import Brain # noqa
brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)
surf = brain._geo
vertidx = np.where(src[0]['inuse'])[0]
mlab.points3d(surf.x[vertidx], surf.y[vertidx],
surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
###############################################################################
# Compute forward solution
# ------------------------
#
# We can now compute the forward solution.
# To reduce computation we'll just compute a single layer BEM (just inner
# skull) that can then be used for MEG (not EEG).
#
# We specify if we want a one-layer or a three-layer BEM using the
# conductivity parameter.
#
# The BEM solution requires a BEM model which describes the geometry
# of the head the conductivities of the different tissues.
conductivity = (0.3,) # for single layer
# conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject='sample', ico=4,
conductivity=conductivity,
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
###############################################################################
# Note that the BEM does not involve any use of the trans file. The BEM
# only depends on the head geometry and conductivities.
# It is therefore independent from the MEG data and the head position.
#
# Let's now compute the forward operator, commonly referred to as the
# gain or leadfield matrix.
#
# See :func:`mne.make_forward_solution` for details on parameters meaning.
fwd = mne.make_forward_solution(raw_fname, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=5.0, n_jobs=2)
print(fwd)
###############################################################################
# We can explore the content of fwd to access the numpy array that contains
# the gain matrix.
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
###############################################################################
# To extract the numpy array containing the forward operator corresponding to
# the source space `fwd['src']` with cortical orientation constraint
# we can use the following:
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True,
force_fixed=True)
leadfield = fwd_fixed['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
###############################################################################
# This is equivalent to the following code that explicitly applies the
# forward operator to a source estimate composed of the identity operator:
n_dipoles = leadfield.shape[1]
vertices = [src_hemi['vertno'] for src_hemi in fwd_fixed['src']]
stc = mne.SourceEstimate(1e-9 * np.eye(n_dipoles), vertices, tmin=0., tstep=1)
leadfield = mne.apply_forward(fwd_fixed, stc, info).data / 1e-9
###############################################################################
# To save to disk a forward solution you can use
# :func:`mne.write_forward_solution` and to read it back from disk
# :func:`mne.read_forward_solution`. Don't forget that FIF files containing
# forward solution should end with *-fwd.fif*.
#
# To get a fixed-orientation forward solution, use
# :func:`mne.convert_forward_solution` to convert the free-orientation
# solution to (surface-oriented) fixed orientation.
###############################################################################
# Exercise
# --------
#
# By looking at
# :ref:`sphx_glr_auto_examples_forward_plot_forward_sensitivity_maps.py`
# plot the sensitivity maps for EEG and compare it with the MEG, can you
# justify the claims that:
#
# - MEG is not sensitive to radial sources
# - EEG is more sensitive to deep sources
#
# How will the MEG sensitivity maps and histograms change if you use a free
# instead if a fixed/surface oriented orientation?
#
# Try this changing the mode parameter in :func:`mne.sensitivity_map`
# accordingly. Why don't we see any dipoles on the gyri?
| jaeilepp/mne-python | tutorials/plot_forward.py | Python | bsd-3-clause | 8,258 | [
"Mayavi"
] | 53d8779e55b547b54e60c8b14c62bbd145c4b55c3ac971bbb4a4f819a95f6c3f |
# -*- coding: utf-8 -*-
#import sys # for devel only
#
#import numpy
#import json
#
from aiida.parsers.parser import Parser
from aiida.parsers.exceptions import OutputParsingError
from aiida.common.datastructures import calc_states
#
from aiida.orm.calculation.job.vasp.vasp import VaspCalculation
from aiida.orm.calculation.job.vasp.vasp import ParserInstructionFactory
from aiida.orm.data.parameter import ParameterData
__copyright__ = u'Copyright © 2016, Mario Zic, Trinity College Dublin. All Rights Reserved.'
__license__ = "Apache, Version 2.0, see LICENSE.txt file"
__version__ = "0.0.1"
__contributors__ = "Mario Zic"
__contact__ = u'zicm_at_tcd.ie'
class VaspParser(Parser):
"""
This class is the implementation of the Parser class
for the VASP calculator.
"""
_outstruct_name = 'output_structure'
def __init__(self, calculation):
"""
Initialize the instance of VaspParser
"""
# check for valid input
if not isinstance(calculation, VaspCalculation):
raise OutputParsingError(
"Input calculation must be a VaspCalculation"
)
self._calc = calculation
def parse_from_calc(self, manual=True, custom_instruct=None):
"""
Parses the datafolder, stores results.
"""
from aiida.common.exceptions import InvalidOperation
from aiida.common import aiidalogger
from aiida.utils.logger import get_dblogger_extra
parserlogger = aiidalogger.getChild('vaspparser')
logger_extra = get_dblogger_extra(self._calc)
# suppose at the start that the job is successful
successful = True
parser_warnings = {} # for logging non-critical events
# check that calculation is in the right state
if not manual:
state = self._calc.get_state()
if state != calc_states.PARSING:
raise InvalidOperation(
"Calculation not in {} state".format(calc_states.PARSING)
)
# get parser instructions
# TODO: output parser should NOT interpret the input !!!
try:
instruct = self._calc.get_inputs_dict().pop(
self._calc.get_linkname('settings'))
instruct = instruct.get_dict()
instruct = instruct[u'PARSER_INSTRUCTIONS']
########## Abel Modification to test custom parsers
if custom_instruct is not None:
instruct = custom_instruct
##########
# check if structure, data, and error parsers are specified
# if not append defaults
itypes = [i['type'] for i in instruct]
# structure
if not 'structure' in itypes:
instruct.append({
'instr': 'default_structure_parser',
'type': 'structure',
'params': {}}
)
parser_warnings.setdefault(
'Structure parser instruction not found!',
'default_structure_parser loaded.'
)
# error
if not 'error' in itypes:
instruct.append({
'instr': 'default_error_parser',
'type': 'error',
'params': {}}
)
parser_warnings.setdefault(
'Error parser instruction not found!',
'default_error_parser loaded.'
)
# output
if not 'data' in itypes:
instruct.append({
'instr': 'default_vasprun_parser',
'type': 'data',
'params': {}}
)
parser_warnings.setdefault(
'Data parser instruction not found!',
'default_data_parser_parser loaded.'
)
except:
parser_warnings.setdefault(
'Parser instructions not found',
'Default instructions were loaded.'
)
# don't crash, load default instructions instead
instruct = [
# output
{
'instr': 'default_vasprun_parser',
'type': 'data',
'params': {}
},
# error
{
'instr': 'default_error_parser',
'type': 'error',
'params': {}
},
# structure
{
'instr': 'default_structure_parser',
'type': 'structure',
'params': {}
}
]
# select the folder object
out_folder = self._calc.get_retrieved_node()
# check what is inside the folder
list_of_files = out_folder.get_folder_list()
# === check if mandatory files exist ===
# default output file should exist
if not self._calc._default_output in list_of_files:
successful = False
parserlogger.error(
"Standard output file ({}) not found".format(
self._calc._default_output
),
extra=logger_extra
)
return successful, ()
# output structure file should exist
if not self._calc._output_structure in list_of_files:
successful = False
parserlogger.error(
"Output structure file ({}) not found".format(
self._calc._output_structure
),
extra=logger_extra
)
return successful, ()
# stderr file should exist
if not self._calc._SCHED_ERROR_FILE in list_of_files:
successful = False
parserlogger.error(
"STDERR file ({}) not found".format(
self._calc._SCHED_ERROR_FILE
),
extra=logger_extra
)
return successful, ()
instr_node_list = []
errors_node_list = []
# === execute instructions ===
# print instruct
for instr in instruct:
# create an executable instruction
try:
# load instruction
itype = instr['type'].lower()
iname = instr['instr']
iparams = instr['params']
ifull_name = "{}.{}".format(itype, iname)
# append parameters
if itype == 'error':
iparams.setdefault(
'SCHED_ERROR_FILE', self._calc._SCHED_ERROR_FILE)
elif itype == 'structure':
iparams.setdefault(
'OUTPUT_STRUCTURE', self._calc._output_structure)
# instantiate
instr = ParserInstructionFactory(ifull_name)
instr_exe = instr(
out_folder,
params=iparams if iparams else None
)
except ValueError:
parser_warnings.setdefault(
'{}_instruction'.format(instr),
'Invalid parser instruction - could not be instantiated!'
)
instr_exe = None
# execute
if instr_exe:
try:
for item in instr_exe.execute(): # store the results
instr_node_list.append(item)
except Exception as e:
print instr, e
# parser_warnings['output'].setdefault( Modified by Abel
parser_warnings.setdefault('output',{
'{}_instruction'.format(instr),
'Failed to execute. Errors: {}'.format(e)
})
# add all parser warnings to the error list
parser_warnings = ParameterData(dict=parser_warnings)
errors_node_list.append((
'parser_warnings', parser_warnings
))
# === save the outputs ===
new_nodes_list = []
# save the errors/warrnings
for item in errors_node_list:
new_nodes_list.append(item)
# save vasp data
if instr_node_list:
for item in instr_node_list:
new_nodes_list.append(item)
return successful, new_nodes_list
| abelcarreras/aiida_extensions | plugins/parsers/vasp/__init__.py | Python | mit | 8,544 | [
"VASP"
] | a13518304ea6c2a6ec9f480825e848325647b2bd3e8d4fdfdd3339b1d475b3c4 |
# generics/DogsAndRobots.py
# (c)2017 MindView LLC: see Copyright.txt
# We make no guarantees that this code is fit for any purpose.
# Visit http://OnJava8.com for more book information.
class Dog:
def speak(self):
print("Arf!")
def sit(self):
print("Sitting")
def reproduce(self):
pass
class Robot:
def speak(self):
print("Click!")
def sit(self):
print("Clank!")
def oilChange(self):
pass
def perform(anything):
anything.speak()
anything.sit()
a = Dog()
b = Robot()
perform(a)
perform(b)
output = """
Arf!
Sitting
Click!
Clank!
"""
| mayonghui2112/helloWorld | sourceCode/testMaven/onjava8/src/main/java/generics/DogsAndRobots.py | Python | apache-2.0 | 618 | [
"VisIt"
] | b00824e8b2205a3a7ea19d94fab4b04c7383d6daa2d6b49ce515feb9c4bdc8a1 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import itertools
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy.ndimage.interpolation import shift
import abel
from abel.tools.center import find_origin, center_image, set_center
def test_find_origin():
"""
Test find_origin methods.
"""
size = [12, 13]
row, col = 5.4, 6.6 # origin
w = 3.0 # gaussian width parameter (sqrt(2) * sigma)
for rows in size:
y2 = ((np.arange(rows) - row) / w)**2
for cols in size:
x2 = ((np.arange(cols) - col) / w)**2
data = np.exp(-(x2 + y2[:, None]))
axes = (1, 0)
# (not testing trivial 'image_center', which does not find origin)
for method in ['com', 'convolution', 'gaussian', 'slice']:
origin = find_origin(data, method, axes)
ref = (row if 0 in axes else rows // 2,
col if 1 in axes else cols // 2)
tol = 0.2 # 'convolution' rounds to 0.5 pixels
assert_allclose(origin, ref, atol=tol, verbose=False,
err_msg='-> {} x {}, method = {}, axes = {}: '
'origin = {} not equal {}'.
format(rows, cols, method, axes,
origin, ref))
def test_set_center_int():
"""
Test whole-pixel shifts.
"""
# input sizes
size = [4, 5]
# input size, crop, origin -> output elements
param = {4: {'maintain_size': [[None, '1234'],
[0, '0012'],
[1, '0123'],
[2, '1234'],
[3, '2340']],
'valid_region': [[None, '1234'],
[0, '1'],
[1, '123'],
[2, '234'],
[3, '4']],
'maintain_data': [[None, '1234'],
[0, '0001234'],
[1, '01234'],
[2, '12340'],
[3, '1234000']]},
5: {'maintain_size': [[None, '12345'],
[0, '00123'],
[1, '01234'],
[2, '12345'],
[3, '23450'],
[4, '34500']],
'valid_region': [[None, '12345'],
[0, '1'],
[1, '123'],
[2, '12345'],
[3, '345'],
[4, '5']],
'maintain_data': [[None, '12345'],
[0, '000012345'],
[1, '0012345'],
[2, '12345'],
[3, '1234500'],
[4, '123450000']]}}
# all size combinations
for rows, cols in itertools.product(size, repeat=2):
# test data: consecutive numbers from 1, row by row
data = (np.arange(rows * cols) + 1).reshape((rows, cols))
# all crop options
for crop in ['maintain_size', 'valid_region', 'maintain_data']:
# all origin rows
for row, rref in param[rows][crop]:
# vector or reference rows
rref = np.array([int(n) for n in rref])
# all origin columns
for col, cref in param[cols][crop]:
# vector of reference columns
cref = np.array([int(n) for n in cref])
# reference array
ref = (rref[:, None] - 1) * cols + cref
ref[rref == 0] = 0
ref[:, cref == 0] = 0
# check set_center() result
result = set_center(data, (row, col), crop=crop)
assert_equal(result, ref, verbose=False,
err_msg='-> {} x {}, origin = {}, crop = {}\n'
'result =\n{}\n'
'must be =\n{}'.
format(rows, cols, (row, col), crop,
result, ref))
def test_set_center_float():
"""
Test fractional shifts.
"""
# input sizes
size = [10, 11]
# default origin coordinate (substituting None)
default = 5.0
# input size, origin, crop -> output size, non-zero range
param = {10: [(None, {'maintain_size': [10, (0, 10)],
'valid_region': [10, (0, 10)],
'maintain_data': [10, (0, 10)]}),
(2.5, {'maintain_size': [10, (2, 10)],
'valid_region': [5, (0, 5)],
'maintain_data': [15, (4, 15)]}),
(3.5, {'maintain_size': [10, (1, 10)],
'valid_region': [7, (0, 7)],
'maintain_data': [13, (2, 13)]}),
(4.5, {'maintain_size': [10, (0, 10)],
'valid_region': [9, (0, 9)],
'maintain_data': [11, (0, 11)]}),
(5.5, {'maintain_size': [10, (0, 10)],
'valid_region': [7, (0, 7)],
'maintain_data': [13, (0, 11)]}),
(6.5, {'maintain_size': [10, (0, 9)],
'valid_region': [5, (0, 5)],
'maintain_data': [15, (0, 11)]})],
11: [(None, {'maintain_size': [11, (0, 11)],
'valid_region': [11, (0, 11)],
'maintain_data': [11, (0, 11)]}),
(3.5, {'maintain_size': [11, (1, 11)],
'valid_region': [7, (0, 7)],
'maintain_data': [15, (3, 15)]}),
(4.5, {'maintain_size': [11, (0, 11)],
'valid_region': [9, (0, 9)],
'maintain_data': [13, (1, 13)]}),
(5.5, {'maintain_size': [11, (0, 11)],
'valid_region': [9, (0, 9)],
'maintain_data': [13, (0, 12)]}),
(6.5, {'maintain_size': [11, (0, 10)],
'valid_region': [7, (0, 7)],
'maintain_data': [15, (0, 12)]})]}
w = 2.0 # gaussian width parameter (sqrt(2) * sigma)
# all size combinations
for rows, cols in itertools.product(size, repeat=2):
# all origin "rows"
for row, rparam in param[rows]:
y2 = ((np.arange(rows) - (row or default)) / w)**2
# all origin "columns"
for col, cparam in param[cols]:
x2 = ((np.arange(cols) - (col or default)) / w)**2
# test data: gaussian centered at (row, col)
data = np.exp(-(x2 + y2[:, None]))
# all crop options
for crop in ['maintain_size', 'valid_region', 'maintain_data']:
# check set_center() result
result = set_center(data, (row, col), crop=crop)
refrows, rrange = rparam[crop]
refcols, crange = cparam[crop]
refshape = (refrows, refcols)
refrange = (slice(*rrange), slice(*crange))
reforigin = (refrows // 2 if row else default,
refcols // 2 if col else default)
msg = '-> {} x {}, origin = {}, crop = {}: '.\
format(rows, cols, (row, col), crop)
# shape
assert_equal(result.shape, refshape, verbose=False,
err_msg=msg + 'shape {} not equal {}'.
format(result.shape, refshape))
# non-zero data
assert_equal(result[refrange] != 0, True,
err_msg=msg + 'zeros in non-zero range')
# zero padding
tmp = result.copy()
tmp[refrange] = 0
assert_equal(tmp, 0, err_msg=msg +
'non-zeros outside non-zero range')
# gaussian center
origin = find_origin(result, 'gaussian')
assert_allclose(origin, reforigin, atol=0.01,
verbose=False, err_msg=msg +
'shifted center {} not equal {}'.
format(origin, reforigin))
def test_set_center_axes():
"""
Test "None" origin components and axes selection.
"""
for N in [4, 5]:
data = np.arange(N**2).reshape((N, N))
c = N // 2
msg = '-> N = {}, '.format(N)
assert_equal(set_center(data, (None, None)),
data,
err_msg=msg + '(None, None)')
assert_equal(set_center(data, (0, 0), axes=[]),
data,
err_msg=msg + '(0, 0), axes=[]')
assert_equal(set_center(data, (0, None)),
set_center(data, (0, c)),
err_msg=msg + '(0, None)')
assert_equal(set_center(data, (None, 0)),
set_center(data, (c, 0)),
err_msg=msg + '(None, 0)')
assert_equal(set_center(data, (0, 0), axes=0),
set_center(data, (0, c)),
err_msg=msg + '(0, 0), axes=0')
assert_equal(set_center(data, (0, 0), axes=1),
set_center(data, (c, 0)),
err_msg=msg + '(0, 0), axes=1')
def test_set_center_order():
"""
Test rounding for order = 0 and exact output for order = 1.
"""
data = data = np.ones((5, 5))
origin = np.array([1.9, 2.2])
# check origin rounding for order = 0
assert_equal(set_center(data, origin, order=0),
set_center(data, origin.round()),
err_msg='-> order = 0 not equal round(origin)')
# check output for order = 1:
# maintain_size
result = set_center(data, origin, 'maintain_size', order=1)
ref = np.outer([0.9, 1, 1, 1, 1],
[1, 1, 1, 1, 0.8])
assert_allclose(result, ref,
err_msg='-> crop = maintain_size, order = 1')
# valid_region
result = set_center(data, origin, 'valid_region', order=1)
ref = np.ones((3, 3))
assert_allclose(result, ref,
err_msg='-> crop = valid_region, order = 1')
# maintain_data
result = set_center(data, origin, 'maintain_data', order=1)
ref = np.outer([0, 0.9, 1, 1, 1, 1, 0.1],
[0.2, 1, 1, 1, 1, 0.8, 0])
assert_allclose(result, ref,
err_msg='-> crop = maintain_data, order = 1')
def test_center_image():
# BASEX sample image, Gaussians at 10, 15, 20, 70,85, 100, 145, 150, 155
# image width, height n = 361, origin = (180, 180)
IM = abel.tools.analytical.SampleImage(n=361, name="dribinski").image
# artificially displace origin, now at (179, 182)
IMx = shift(IM, (-1, 2))
true_origin = (179, 182)
# find_origin using 'slice' method
origin = find_origin(IMx, method="slice")
assert_allclose(origin, true_origin, atol=1)
# find_origin using 'com' method
origin = find_origin(IMx, method="com")
assert_allclose(origin, true_origin, atol=1)
# check single axis - vertical
# center shifted image IMx in the vertical direction only
IMc = center_image(IMx, method="com", axes=1)
# determine the origin
origin = find_origin(IMc, method="com")
assert_allclose(origin, (179, 180), atol=1)
# check single axis - horizontal
# center shifted image IMx in the horizontal direction only
IMc = center_image(IMx, method="com", axes=0)
origin = find_origin(IMc, method="com")
assert_allclose(origin, (180, 182), atol=1)
# check even image size returns odd
# drop off one column, to make an even column image
IM = IM[:, :-1]
m, n = IM.shape
IMy = center_image(IM, method="slice", odd_size=True)
assert_allclose(IMy.shape, (m, n-1))
if __name__ == "__main__":
test_find_origin()
test_set_center_axes()
test_set_center_int()
test_set_center_float()
test_set_center_order()
test_center_image()
| DanHickstein/pyBASEX | abel/tests/test_tools_center.py | Python | gpl-2.0 | 12,986 | [
"Gaussian"
] | c9b517094e4831efd5aee789b73766f70331f5cb69986b7e06647ac1e89ed001 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# This script generates the solutions to the radially-symmetric cold-CO2 injection scenario
# presented in:
# (1) T LaForce, J Ennis-King, L Paterson ``Semi-analytical solutions for nonisothermal fluid injection including heat loss from the reservoir: Part 1. Saturation and temperature'' Advances in Water Resources 73 (2014) 227--234.
# (2) T LaForce, A Mijic, J Ennis-King, L Paterson ``Semi-analytical solutions for nonisothermal fluid injection including heat loss from the reservoir: Part 2. Pressure and stress'' Advances in Water Resources 73 (2014) 242--253.
#
# The main contolling parameter is "resolution", below.
# It is the number of points at which the output is computed at.
# The functions below are incredibly simple minded and involve integrations
# over the spatial domain, which means that this whole script completes in
# approximately resolution^2 time.
# resolution=10 gives a reasonable approximation, while resolution=800 is very good
#
# The other input parameters, such as fluid densities, etc, are all given below.
import os
import sys
import numpy as np
import scipy.optimize as opt
import scipy.interpolate
import matplotlib.pyplot as plt
import scipy.special
##############################
# #
# Main controlling parameter #
# #
##############################
resolution = 800
#######################
# #
# Physical quantities #
# #
#######################
rho1 = 970.00 # density of water
rho2 = 516.48 # density of CO2
C1 = 4149.0 # specific heat capacity of water
C2 = 2920.5 # specific heat capacity of CO2
phir = 0.2 # porosity of reservoir
phia = 0.02 # porosity of the adjacent formations
rhor = rhos = 2350.0 # density of the reservoir rock grains
rhoa = 2773.4 # density of adjacent formations
Cr = Cs = 1100.0 # specific heat capacity of reservoir rock grains
Ca = 828.9 # specific heat capacity of adjacent rock grains
tti = 358 # initial temperature of reservoir
ttw = 294 # injection temperature
ll = 5000.0 # lateral extent of reservoir
rw = 0.1 # radius of well
q = 15.855 # methane injection rate
h = 11.0 # height of reservoir
ss1r = 0.200 # residual saturation of water
ss2r = 0.205 # residual saturation of CO2
mu1_reservoir = 0.4704 # water viscosity at reservoir temperature
mu2_reservoir = 0.0163 # methane viscosity at reservoir temperature
mu1_intermediate = 0.6733 # water viscosity at intermediate temperature
mu2_intermediate = 0.0164 # methane viscosity at intermediate temperature
mu1_injection = 1.0473 # water viscosity at injection temperature
mu2_injection = 0.0167 # methane viscosity at injection temperature
mu1_moose = 3.394E-4 # viscosity of water used by MOOSE
mu2_moose = 3.93E-5 # viscosity of CO2 used by MOOSE
kka = 4.310 # wet thermal conductivity of the adjacent formation
comp1 = 1.0 / (2.27E14) # compressibility of water
k = 2E-12 # permeability in radial direction
thermal_expansion = 5E-6 # linear thermal expansion coefficient of rock
poisson = 0.2
young = 14.4E9
stress_eff_hor_ini = -12.8E6
# Tara parameters
#tti = 84.8
#ttw = 21
#mu2_moose = 0.0642E-3
#comp1 = 4.4E-10
######################
# #
# Derived quantities #
# #
######################
one_hour = 3600.0
one_day = one_hour * 24
one_month = one_day * 30
five_years = one_day * 365 * 5
alpha = rho2 * C2 / (rho1 * C1 - rho2 * C2)
beta = (rho2 * C2 + (1.0 - phir) * rhos * Cs / phir) / (rho1 * C1 - rho2 * C2)
gammaR = ll * ll * np.pi / (q / rho2) / (rho1 * C1 - rho2 * C2)
ttr = tti - ttw
##########
# #
# Code #
# #
##########
def integrate_over_r(rmin, rmax, fcn, n = 0, bias_to_small_r = True, num_rs = 100):
# \int_{r=rmin}^{r=rmax}dr r^{n} fcn
# fcn should be a scipy.interpolate.interp1d object
# This uses the trapezoidal rule
# If bias_to_small_r=true then the "r points" are clustered more closely around r=rmin
if bias_to_small_r:
r_points = [np.log10(rmin) + (np.log10(rmax) - np.log10(rmin)) * i / float(num_rs - 1) for i in range(num_rs)]
r_points = [np.power(10, x) for x in r_points]
else:
r_points = [rmin + (rmax - rmin) * i / float(num_rs - 1) for i in range(num_rs)]
result = 0.0
for i in range(num_rs - 1):
a = r_points[i]
b = r_points[i + 1]
result += 0.5 * (b - a) * (np.power(b, n) * fcn(b) + np.power(a, n) * fcn(a))
return result
def xd_given_r(r):
return r * r / ll / ll
def r_given_xd(xd):
return np.sqrt(xd) * ll
def td_given_t(t):
return (q / rho2) * t / np.pi / phir / ll / ll / h
def t_given_td(td):
return td * np.pi * phir * ll * ll * h / (q / rho2)
def uu_given_t(t):
return np.sqrt(kka * (phia * rho1 * C1 + (1.0 - phia) * rhoa * Ca) / t)
def ttd_given_tt(tt):
return (tt - tti) / (ttw - tti)
def tt_given_ttd(ttd):
return ttd * (ttw - tti) + tti
def sshat_given_ss1(ss1):
return (ss1 - ss1r) / (1 - ss1r - ss2r)
def dsshat_dss1():
return 1.0 / (1 - ss1r - ss2r)
def ss1_given_sshat(sshat):
return (1 - ss1r - ss2r) * sshat + ss1r
def kr1_hat(sshat):
return np.power(sshat, 4.0)
def dkr1_hat_dsshat(sshat):
return 4.0 * np.power(sshat, 3.0)
def kr2_hat(sshat):
return np.power(1 - sshat, 2.0) * (1 - sshat * sshat)
def dkr2_hat_dsshat(sshat):
return -4 * np.power(sshat, 3) + 6 * np.power(sshat, 2) - 2
def kr1(ss1):
sshat = sshat_given_ss1(ss1)
if sshat <= 0:
return 0.0
elif sshat >= 1:
return 1.0
return kr1_hat(sshat)
def dkr1_dss1(ss1):
sshat = sshat_given_ss1(ss1)
if sshat <= 0:
return 0.0
elif sshat >= 1:
return 0.0
return dkr1_hat_dsshat(sshat) * dsshat_dss1()
def kr2(ss1):
sshat = sshat_given_ss1(ss1)
if sshat <= 0:
return 1.0
elif sshat >= 1:
return 0.0
return kr2_hat(sshat)
def dkr2_dss1(ss1):
sshat = sshat_given_ss1(ss1)
if sshat <= 0:
return 0.0
elif sshat >= 1:
return 0.0
return dkr2_hat_dsshat(sshat) * dsshat_dss1()
def f1(ss1, mu1, mu2):
sshat = sshat_given_ss1(ss1)
if sshat <= 0:
return 0.0
elif sshat >= 1:
return 1.0
return 1.0 / (1.0 + kr2(ss1) * mu1 / kr1(ss1) / mu2)
def df1_dss1(ss1, mu1, mu2):
sshat = sshat_given_ss1(ss1)
if sshat <= 0:
return 0.0
elif sshat >= 1:
return 0.0
dby_denom = - np.power(f1(ss1, mu1, mu2), 2)
return dby_denom * (mu1 / mu2) * (dkr2_dss1(ss1) / kr1(ss1) - kr2(ss1) * dkr1_dss1(ss1) / np.power(kr1(ss1), 2))
def f2(ss1, mu1, mu2):
return 1.0 - f1(ss1, mu1, mu2)
def df2_dss1(ss1, mu1, mu2):
return - df1_dss1(ss1, mu1, mu2)
def ss1ss(mu1, mu2):
# returns S_{1S}, which is the water saturation when
# df1_dss1(S) = (f1(S) - 1) / (S - 1)
def f(s):
return df1_dss1(s, mu1, mu2) - (f1(s, mu1, mu2) - 1) / (s - 1)
return opt.fsolve(f, 0.6)[0]
def ss1tt(mu1, mu2):
# returns S_{1T}, which is the water saturation when
# df1_dss1(S) = (f1(S) + alpha) / (S + beta)
def f(s):
return df1_dss1(s, mu1, mu2) - (f1(s, mu1, mu2) + alpha) / (s + beta)
return opt.fsolve(f, 0.4)[0]
# Tara's S_{1S} in upper-left Fig2 of paper1
ss1ss_reservoir = ss1ss(mu1_reservoir, mu2_reservoir)
ss1ss_injection = ss1ss(mu1_injection, mu2_injection)
ss1ss_moose = ss1ss(mu1_moose, mu2_moose)
def saturation_shock_position(t, mu1, mu2):
td = td_given_t(t)
v_saturation_shock = df1_dss1(ss1ss(mu1, mu2), mu1, mu2)
xd = v_saturation_shock * td
return r_given_xd(xd)
def temperature_shock_position(t, mu1, mu2):
td = td_given_t(t)
v_temperature_shock = df1_dss1(ss1tt(mu1, mu2), mu1, mu2)
xd = v_temperature_shock * td
return r_given_xd(xd)
def co2_saturation(t, mu1, mu2, num_saturations = 1000):
# co2 saturation as a function of radial position
local_ss1ss = ss1ss(mu1, mu2)
water_saturations = [local_ss1ss * i / float(num_saturations - 1) for i in range(num_saturations)]
velocities = [df1_dss1(s, mu1, mu2) for s in water_saturations]
td = td_given_t(t)
xds = [v * td for v in velocities]
r = [r_given_xd(xd) for xd in xds]
r += [r[-1] + 1E-9, ll]
co2_saturations = [1 - s for s in water_saturations] + [0, 0]
return (r, co2_saturations)
def tt_result(t, mu1, mu2, num_rs = 10):
# temperature distribution as a function of radial position
shock_pos = temperature_shock_position(t, mu1, mu2)
rs = [shock_pos * i / float(num_rs - 1) for i in range(num_rs)]
xds = [xd_given_r(r) for r in rs]
# calculate the water saturation
co2_s = co2_saturation(t, mu1, mu2, num_rs)
xds_where_s_defined = [xd_given_r(r_val) for r_val in co2_s[0]]
s_water = [(1 - s_val) for s_val in co2_s[1]]
while (xds_where_s_defined[0] == 0.0):
xds_where_s_defined.pop(0)
s_water.pop(0)
xds_where_s_defined.insert(0, 0.0)
s_water.insert(0, ss1r)
s_water_interp = scipy.interpolate.interp1d(xds_where_s_defined, s_water)
# calculate integral of Eqn(20) of Tara's first paper, for each xd in xds
integral = []
for xd in xds:
finite_difference_points = [xd * i / float(num_rs - 1) for i in range(num_rs)]
integrand = [-gammaR * uu_given_t(t) / (f1(s_water_interp(xd), mu1, mu2) + alpha) for xd in xds]
integral.append(sum(integrand) * (finite_difference_points[1] - finite_difference_points[0])) # edges of integral probably don't matter if num_rs is large enough
ttd_vals = np.exp(integral)
tt_vals = [tt_given_ttd(ttd) for ttd in ttd_vals]
rs += [rs[-1] * (1 + 1E-9), ll]
tt_vals += [tti, tti]
return (rs, tt_vals)
def ggp(t, mu1, mu2, num_rs = 10, rmin = rw):
# Gp(r) of Eqn(14) in Tara's second paper (i've written it as a fcn of (t, r) in contrast to Tara's (td, rd))
shock_pos = saturation_shock_position(t, mu1, mu2)
# rs and xds are the positions to provide the answer (Gp) at
rs = [np.log10(rmin) + (np.log10(shock_pos)- np.log10(rmin)) * i / float(num_rs - 1) for i in range(num_rs)]
rs = [np.power(10, r) for r in rs]
# calculate the water saturation
co2_s = co2_saturation(t, mu1, mu2, num_rs)
r_vals_where_s_defined = co2_s[0]
s_water = [(1 - s_val) for s_val in co2_s[1]]
while r_vals_where_s_defined[0] == 0.0:
r_vals_where_s_defined.pop(0)
s_water.pop(0)
r_vals_where_s_defined.insert(0, 0.0)
s_water.insert(0, ss1r)
s_water_interp = scipy.interpolate.interp1d(r_vals_where_s_defined, s_water)
# calculate integral of Eqn(14) of Tara's first paper, for each xd in xds
integral = []
for r in rs:
r_points = [np.log10(r) + (np.log10(rs[-1]) - np.log10(r)) * i / float(num_rs - 1) for i in range(num_rs)]
r_points = [np.power(10, x) for x in r_points]
this_integral = 0.0
for i in range(len(r_points) - 1):
a = r_points[i]
b = r_points[i + 1]
# the 2.0 in the following line comes from dr_D / r_D = 2.0 dr / r
this_integral += 0.5 * (b - a) * (2.0 / b / (kr1(s_water_interp(b)) / mu1 + kr2(s_water_interp(b)) / mu2) + 2.0 / a / (kr1(s_water_interp(a)) / mu1 + kr2(s_water_interp(a)) / mu2))
integral.append(this_integral)
return (rs, integral)
def ggb(t, r, mu1):
# Gb of Eqn(15) in Tara's second paper (i've written it as a fcn of (t, r) in contrast to Tara's (td, rd))
return - scipy.special.expi(- (q / rho2) * mu1 * comp1 * xd_given_r(r) / 4 / np.pi / h / k / td_given_t(t))
def ggll(t, r, mu1):
# G_L of Eqn(16) in Tara's second paper (i've written it as a fcn of (t, r) in contrast to Tara's (td, rd))
return - scipy.special.expi(- (q / rho2) * mu1 * comp1 * xd_given_r(ll) / 4 / np.pi / h / k / td_given_t(t))
def pp_increase(t, mu1, mu2, num_rs = 10, rmin = rw):
shock_pos = saturation_shock_position(t, mu1, mu2)
rs = [np.log10(rmin) + (np.log10(ll)- np.log10(rmin)) * i / float(num_rs - 1) for i in range(num_rs)]
rs = [np.power(10, r) for r in rs]
precompute_ggp = ggp(t, mu1, mu2, num_rs)
ggp_interp = scipy.interpolate.interp1d(precompute_ggp[0], precompute_ggp[1])
pp_inc = []
for r in rs:
if r <= shock_pos:
pp_inc.append(ggp_interp(r) + mu1 * ggb(t, shock_pos, mu1) - mu1 * ggll(t, r, mu1))
else:
pp_inc.append(mu1 * ggb(t, r, mu1) - mu1 * ggll(t, r, mu1))
return rs, [(q / rho2) / 4 / np.pi / h / k * delp for delp in pp_inc]
def upr(t, temp_soln, pp_inc, num_rs = 10, rmin = rw):
# u_pr in Eqn(33) of Tara's second paper
rs = [np.log10(rmin) + (np.log10(ll)- np.log10(rmin)) * i / float(num_rs - 1) for i in range(num_rs)]
rs = [np.power(10, r) for r in rs]
# first do the integral of g = T_{D2} (Eqn(34))
temp_interp = scipy.interpolate.interp1d(temp_soln[0], temp_soln[1])
g1 = [0.0]
for r in rs[1:]:
r_points = [np.log10(rmin) + (np.log10(r) - np.log10(rmin)) * i / float(num_rs - 1) for i in range(num_rs)]
r_points = [np.power(10, x) for x in r_points]
this_integral = 0.0
for i in range(len(r_points) - 1):
a = r_points[i]
b = r_points[i + 1]
# the (1.0 / rw^2) in the following line comes from eta deta = r dr / rw^2
# T_{D2} = -T_{D1} = (T - T_{i})/ (T_{w} - T_{i})
# NOTE: check this negative sign
contrib = - 0.5 * (b - a) * (b * ttd_given_tt(temp_interp(b)) + a * ttd_given_tt(temp_interp(a)))
this_integral += contrib * (1.0 / rw / rw)
g1.append(this_integral)
# second do the integral of g = (1 - 2v)/(1 - v) P_{D}
pp_interp = scipy.interpolate.interp1d(pp_inc[0], pp_inc[1])
g2 = [0.0]
for r in rs[1:]:
r_points = [np.log10(rmin) + (np.log10(r) - np.log10(rmin)) * i / float(num_rs - 1) for i in range(num_rs)]
r_points = [np.power(10, x) for x in r_points]
this_integral = 0.0
for i in range(len(r_points) - 1):
a = r_points[i]
b = r_points[i + 1]
# the (1.0 / rw^2) in the following line comes from eta deta = r dr / rw^2
contrib = 0.5 * (b - a) * (b * pp_interp(b) + a * pp_interp(a))
this_integral += contrib * (1.0 / rw / rw) * (1 - 2 * poisson) / thermal_expansion / ttr / young
g2.append(this_integral)
g = [(g1[i] + g2[i]) * rw / rs[i] for i in range(len(rs))]
return rs, g
def aa_bb(upr_soln, pp_inc):
# Computes A and B, or Eqn(A2) of Tara's second paper
# ASSUME that pp_inc[1][0] is the porepressure increase at the well
phatzero = (pp_inc[1][0] + stress_eff_hor_ini)* (1 - poisson) / thermal_expansion / ttr / young # Probably tara had -stress_eff_hor_ini instead as that gives similar displacements, but incorrect effective stresses near the origin
# ASSUME that upr_soln[1][-1] = u_{pr}(xi=xi1), ie, that upr has been evaluated up to xi=xi1 (which is r=L)
bb = ((1 - 2 * poisson) * phatzero - upr_soln[1][-1] * rw / ll) / (1 - 2 * poisson + rw * rw / ll / ll) # Eqn (A2) of Tara's second paper (possibly with fixed sign) with fixed denominator (that was wrong in Rehbinder too)
aa = -bb * rw * rw / ll / ll - upr_soln[1][-1] * rw / ll # Eqn (A2) of Tara's second paper with fixed sign on first term
return (aa, bb)
def uh(upr_soln, pp_inc, num_rs = 10, rmin = rw):
# u_h in Eqn(32) of Tara's second paper
aa, bb = aa_bb(upr_soln, pp_inc)
rs = [np.log10(rmin) + (np.log10(ll)- np.log10(rmin)) * i / float(num_rs - 1) for i in range(num_rs)]
rs = [np.power(10, r) for r in rs]
result = []
for r in rs:
result.append(aa * r / rw + bb * rw / r)
return (rs, result)
def eff_rr(upr_soln, pp_inc, num_rs = 10, rmin = rw):
# effective stress in rr direction: Eqn(A3) of Tara's second paper
aa, bb = aa_bb(upr_soln, pp_inc)
if pp_inc[0] != upr_soln[0]:
sys.stderr.write("pp_inc defined at different points to upr_soln\n")
sys.exit(1)
result = [stress_eff_hor_ini + (thermal_expansion * ttr * young / (1 - poisson)) * (aa / (1 - 2 * poisson) - bb * rw * rw / pp_inc[0][i] / pp_inc[0][i] + (1 - poisson) * pp_inc[1][i] / thermal_expansion / ttr / young - upr_soln[1][i] * rw / pp_inc[0][i]) for i in range(len(pp_inc[0]))]
return pp_inc[0], [r / 1E6 for r in result]
def eff_tt(upr_soln, pp_inc, tt_soln, num_rs = 10, rmin = rw):
# effective stress in tt direction: Eqn(A3) of Tara's second paper
aa, bb = aa_bb(upr_soln, pp_inc)
if pp_inc[0] != upr_soln[0]:
sys.stderr.write("pp_inc defined at different points to upr_soln\n")
sys.exit(1)
tt_interp = scipy.interpolate.interp1d(tt_soln[0], tt_soln[1])
result = [stress_eff_hor_ini + (thermal_expansion * ttr * young / (1 - poisson)) * (aa / (1 - 2 * poisson) + bb * rw * rw / pp_inc[0][i] / pp_inc[0][i] + pp_inc[1][i] * poisson / thermal_expansion / ttr / young + upr_soln[1][i] * rw / pp_inc[0][i] + ttd_given_tt(tt_interp(pp_inc[0][i]))) for i in range(len(pp_inc[0]))]
return pp_inc[0], [r / 1E6 for r in result]
def save_to_file(file_name, data):
f = open(file_name, 'w')
trans = zip(*data)
for d in trans:
f.write(",".join(map(str, d)) + "\n")
f.close()
######################################
# #
# Generate results and save to files #
# #
######################################
plt.figure()
pp_increase_one_hour = pp_increase(one_hour, mu1_moose, mu2_moose, num_rs=resolution)
pp_increase_one_day = pp_increase(one_day, mu1_moose, mu2_moose, num_rs=resolution)
pp_increase_one_month = pp_increase(one_month, mu1_moose, mu2_moose, num_rs=resolution)
pp_increase_five_years = pp_increase(five_years, mu1_moose, mu2_moose, num_rs=resolution)
plt.semilogx(pp_increase_one_hour[0], [p / 1E6 for p in pp_increase_one_hour[1]], 'b-', label = '1 hour')
plt.semilogx(pp_increase_one_day[0], [p / 1E6 for p in pp_increase_one_day[1]], 'r-', label = '1 day')
plt.semilogx(pp_increase_one_month[0], [p / 1E6 for p in pp_increase_one_month[1]], 'g-', label = '1 month')
plt.semilogx(pp_increase_five_years[0], [p / 1E6 for p in pp_increase_five_years[1]], 'k-', label = '5 years')
plt.legend(loc = 'best')
plt.xlim([0.1, ll])
plt.xlabel("r (m)")
plt.ylabel("Porepressure increase (MPa)")
plt.title("Porepressure")
#plt.savefig("pp.pdf")
save_to_file("pp_one_hour.csv", pp_increase_one_hour)
save_to_file("pp_one_day.csv", pp_increase_one_day)
save_to_file("pp_one_month.csv", pp_increase_one_month)
save_to_file("pp_five_years.csv", pp_increase_five_years)
plt.figure()
sg_one_hour = co2_saturation(one_hour, mu1_moose, mu2_moose, num_saturations=resolution)
sg_one_day = co2_saturation(one_day, mu1_moose, mu2_moose, num_saturations=resolution)
sg_one_month = co2_saturation(one_month, mu1_moose, mu2_moose, num_saturations=resolution)
sg_five_years = co2_saturation(five_years, mu1_moose, mu2_moose, num_saturations=resolution)
plt.semilogx(sg_one_hour[0], sg_one_hour[1], 'b-', label = '1 hour')
plt.semilogx(sg_one_day[0], sg_one_day[1], 'r-', label = '1 day')
plt.semilogx(sg_one_month[0], sg_one_month[1], 'g-', label = '1 month')
plt.semilogx(sg_five_years[0], sg_five_years[1], 'k-', label = '5 years')
plt.legend(loc = 'best')
plt.xlim([0.1, ll])
plt.xlabel("r (m)")
plt.ylabel("Saturation")
plt.title("CO2 saturation")
#plt.savefig("sg.pdf")
save_to_file("sg_one_hour.csv", sg_one_hour)
save_to_file("sg_one_day.csv", sg_one_day)
save_to_file("sg_one_month.csv", sg_one_month)
save_to_file("sg_five_years.csv", sg_five_years)
plt.figure()
tt_one_hour = tt_result(one_hour, mu1_moose, mu2_moose, num_rs=resolution)
tt_one_day = tt_result(one_day, mu1_moose, mu2_moose, num_rs=resolution)
tt_one_month = tt_result(one_month, mu1_moose, mu2_moose, num_rs=resolution)
tt_five_years = tt_result(five_years, mu1_moose, mu2_moose, num_rs=resolution)
plt.semilogx(tt_one_hour[0], tt_one_hour[1], 'b-', label = '1 hour')
plt.semilogx(tt_one_day[0], tt_one_day[1], 'r-', label = '1 day')
plt.semilogx(tt_one_month[0], tt_one_month[1], 'g-', label = '1 month')
plt.semilogx(tt_five_years[0], tt_five_years[1], 'k-', label = '5 years')
plt.legend(loc = 'best')
plt.xlim([0.1, ll])
plt.xlabel("r (m)")
plt.ylabel("Temperature (K)")
plt.title("Temperature")
#plt.savefig("temp.pdf")
save_to_file("tt_one_hour.csv", tt_one_hour)
save_to_file("tt_one_day.csv", tt_one_day)
save_to_file("tt_one_month.csv", tt_one_month)
save_to_file("tt_five_years.csv", tt_five_years)
plt.figure()
upr_one_hour = upr(one_hour, tt_one_hour, pp_increase_one_hour, num_rs=resolution, rmin = rw)
uh_one_hour = uh(upr_one_hour, pp_increase_one_hour, num_rs=resolution, rmin=rw)
uhat_one_hour = [upr_one_hour[1][i] + uh_one_hour[1][i] for i in range(len(upr_one_hour[1]))]
u_one_hour = (uh_one_hour[0], [1E3 * thermal_expansion * ttr * rw * (1 + poisson) / (1 - poisson) * uhat for uhat in uhat_one_hour])
upr_one_day = upr(one_day, tt_one_day, pp_increase_one_day, num_rs=resolution, rmin = rw)
uh_one_day = uh(upr_one_day, pp_increase_one_day, num_rs=resolution, rmin=rw)
uhat_one_day = [upr_one_day[1][i] + uh_one_day[1][i] for i in range(len(upr_one_day[1]))]
u_one_day = (uh_one_day[0], [1E3 * thermal_expansion * ttr * rw * (1 + poisson) / (1 - poisson) * uhat for uhat in uhat_one_day])
upr_one_month = upr(one_month, tt_one_month, pp_increase_one_month, num_rs=resolution, rmin = rw)
uh_one_month = uh(upr_one_month, pp_increase_one_month, num_rs=resolution, rmin=rw)
uhat_one_month = [upr_one_month[1][i] + uh_one_month[1][i] for i in range(len(upr_one_month[1]))]
u_one_month = (uh_one_month[0], [1E3 * thermal_expansion * ttr * rw * (1 + poisson) / (1 - poisson) * uhat for uhat in uhat_one_month])
upr_five_years = upr(five_years, tt_five_years, pp_increase_five_years, num_rs=resolution, rmin = rw)
uh_five_years = uh(upr_five_years, pp_increase_five_years, num_rs=resolution, rmin=rw)
uhat_five_years = [upr_five_years[1][i] + uh_five_years[1][i] for i in range(len(upr_five_years[1]))]
u_five_years = (uh_five_years[0], [1E3 * thermal_expansion * ttr * rw * (1 + poisson) / (1 - poisson) * uhat for uhat in uhat_five_years])
plt.semilogx(u_one_hour[0], u_one_hour[1], 'b-', label = '1 hour')
plt.semilogx(u_one_day[0], u_one_day[1], 'r-', label = '1 day')
plt.semilogx(u_one_month[0], u_one_month[1], 'g-', label = '1 month')
plt.semilogx(u_five_years[0], u_five_years[1], 'k-', label = '5 years')
plt.legend(loc = 'best')
plt.xlim([0.1, ll])
plt.xlabel("r (m)")
plt.ylabel("Displacement (mm)")
plt.title("Radial displacement")
#plt.savefig("u.pdf")
save_to_file("u_one_hour.csv", u_one_hour)
save_to_file("u_one_day.csv", u_one_day)
save_to_file("u_one_month.csv", u_one_month)
save_to_file("u_five_years.csv", u_five_years)
seff_rr_one_hour = eff_rr(upr_one_hour, pp_increase_one_hour, num_rs=resolution, rmin=rw)
seff_tt_one_hour = eff_tt(upr_one_hour, pp_increase_one_hour, tt_one_hour, num_rs=resolution, rmin=rw)
seff_rr_one_day = eff_rr(upr_one_day, pp_increase_one_day, num_rs=resolution, rmin=rw)
seff_tt_one_day = eff_tt(upr_one_day, pp_increase_one_day, tt_one_day, num_rs=resolution, rmin=rw)
seff_rr_one_month = eff_rr(upr_one_month, pp_increase_one_month, num_rs=resolution, rmin=rw)
seff_tt_one_month = eff_tt(upr_one_month, pp_increase_one_month, tt_one_month, num_rs=resolution, rmin=rw)
seff_rr_five_years = eff_rr(upr_five_years, pp_increase_five_years, num_rs=resolution, rmin=rw)
seff_tt_five_years = eff_tt(upr_five_years, pp_increase_five_years, tt_five_years, num_rs=resolution, rmin=rw)
plt.figure()
plt.semilogx(seff_rr_one_hour[0], seff_rr_one_hour[1], 'b-', label = '1 hour')
plt.semilogx(seff_rr_one_day[0], seff_rr_one_day[1], 'r-', label = '1 day')
plt.semilogx(seff_rr_one_month[0], seff_rr_one_month[1], 'g-', label = '1 month')
plt.semilogx(seff_rr_five_years[0], seff_rr_five_years[1], 'k-', label = '5 years')
plt.legend(loc = 'best')
plt.xlim([0.1, ll])
plt.xlabel("r (m)")
plt.ylabel("Stress (MPa)")
plt.title("Effective radial stress")
#plt.savefig("seff_rr.pdf")
save_to_file("seff_rr_one_hour.csv", seff_rr_one_hour)
save_to_file("seff_rr_one_day.csv", seff_rr_one_day)
save_to_file("seff_rr_one_month.csv", seff_rr_one_month)
save_to_file("seff_rr_five_years.csv", seff_rr_five_years)
plt.figure()
plt.semilogx(seff_tt_one_hour[0], seff_tt_one_hour[1], 'b-', label = '1 hour')
plt.semilogx(seff_tt_one_day[0], seff_tt_one_day[1], 'r-', label = '1 day')
plt.semilogx(seff_tt_one_month[0], seff_tt_one_month[1], 'g-', label = '1 month')
plt.semilogx(seff_tt_five_years[0], seff_tt_five_years[1], 'k-', label = '5 years')
plt.legend(loc = 'best')
plt.xlim([0.1, ll])
plt.xlabel("r (m)")
plt.ylabel("Stress (MPa)")
plt.title("Effective hoop stress")
#plt.savefig("seff_tt.pdf")
save_to_file("seff_tt_one_hour.csv", seff_tt_one_hour)
save_to_file("seff_tt_one_day.csv", seff_tt_one_day)
save_to_file("seff_tt_one_month.csv", seff_tt_one_month)
save_to_file("seff_tt_five_years.csv", seff_tt_five_years)
sys.exit(0)
| nuclear-wizard/moose | modules/porous_flow/examples/thm_example/paper_solns.py | Python | lgpl-2.1 | 25,361 | [
"MOOSE"
] | 1887ba692c015c0cc5ab3fc42bdfb72cdea63073cbfba5303a3c2afdf1b07026 |
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
from espressomd import integrate
import numpy
print("""
=======================================================
= lj_liquid.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.skin = 0.4
#es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0')
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 5
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.distto(0)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.mindist()
print("Start with minimal distance {}".format(act_min_dist))
system.max_num_cells = 2744
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pylj_liquid.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
# set obs_file [open "$name$ident.obs" "w"]
# puts $obs_file "\# System: $name$ident"
# puts $obs_file "\# Time\tE_tot\tE_kin\t..."
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
integrate.integrate(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
# print("\rrun %d at time=%f (LJ cap=%f) min dist = %f\r" % (i,system.time,lj_cap,act_min_dist), end=' ')
i += 1
# write observables
# puts $obs_file "{ time [setmd time] } [analyze energy]"
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
# Just to see what else we may get from the c code
print("""
ro variables:
cell_grid {0.cell_grid}
cell_size {0.cell_size}
local_box_l {0.local_box_l}
max_cut {0.max_cut}
max_part {0.max_part}
max_range {0.max_range}
max_skin {0.max_skin}
n_nodes {0.n_nodes}
n_part {0.n_part}
n_part_types {0.n_part_types}
periodicity {0.periodicity}
transfer_rate {0.transfer_rate}
verlet_reuse {0.verlet_reuse}
""".format(system))
# write parameter file
# polyBlockWrite "$name$ident.set" {box_l time_step skin} ""
set_file = open("pylj_liquid.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# print initial energies
energies = system.analysis.energy()
print(energies)
j = 0
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
# es._espressoHandle.Tcl_Eval('integrate %d' % int_steps)
integrate.integrate(int_steps)
energies = system.analysis.energy()
print(energies)
obs_file.write('{ time %s } %s\n' % (system.time, energies))
linear_momentum = system.analysis.analyze_linear_momentum()
print(linear_momentum)
# write observables
# set energies [analyze energy]
# puts $obs_file "{ time [setmd time] } $energies"
# puts -nonewline "temp = [expr [lindex $energies 1 1]/(([degrees_of_freedom]/2.0)*[setmd n_part])]\r"
# flush stdout
# write intermediate configuration
# if { $i%10==0 } {
# polyBlockWrite "$name$ident.[format %04d $j]" {time box_l} {id pos type}
# incr j
# }
# write end configuration
end_file = open("pylj_liquid.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {id pos type} }")
for i in range(n_part):
end_file.write("%s\n" % system.part[i].pos)
# id & type not working yet
obs_file.close()
set_file.close()
end_file.close()
# es._espressoHandle.die()
# terminate program
print("\nFinished.")
| tbereau/espresso | samples/python/lj_liquid.py | Python | gpl-3.0 | 6,659 | [
"ESPResSo"
] | 5fa6678785c091eba73483590f8a7e23df5206bac8512abebeffe03d2db76ae0 |
#Script to run at set intervals to check on the status of condor jobs, submit them, and collate results if necessary
from web_frontend.condor_copasi_db import models
from web_frontend.copasi.model import CopasiModel
from web_frontend import settings, condor_log, condor_status, email_notify
import subprocess, os, re, datetime
import logging
def condor_submit(condor_file, username=None, results=False):
"""Submit the .job file condor_file to the condor system using the condor_submit command"""
#condor_file must be an absolute path to the condor job filename
(directory, filename) = os.path.split(condor_file)
if not settings.SUBMIT_WITH_USERNAMES:
p = subprocess.Popen([settings.CONDOR_SUBMIT_LOCATION, condor_file],stdout=subprocess.PIPE, cwd=directory)
else:
#Use sudo to submit with the job's user as username instead of condor-copasi-daemon username
#First, though, we need to change the ownership of the copasi file we're submitting along with the job
#We can't use chown, because we're not superuser
#Instead, because we have write access to the file, we can copy it, delete the original, and move the copy back to the original filename
#First, work out the name of the copasi file
#If the job is auto_condor_0.job, the corresponding copasi file will be auto_copasi_0.cps
#If we're processing the SS results file, skip this step.
if not results:
job_re = re.compile(r'auto_condor_(?P<name>.+).job')
name = job_re.match(filename).group('name')
copasi_filename = 'auto_copasi_' + name + '.cps'
#Copy the copasi filename to a temp file
subprocess.check_call(['sudo', '-u', username, settings.CP_LOCATION, '--preserve=mode', os.path.join(directory, copasi_filename), os.path.join(directory, copasi_filename + '.tmp')])
#Remove the original copasi file
subprocess.check_call(['sudo', '-u', username, settings.RM_LOCATION, '-f', os.path.join(directory, copasi_filename)])
#Rename the temp file back to the original name
subprocess.check_call(['sudo', '-u', username, settings.MV_LOCATION, os.path.join(directory, copasi_filename + '.tmp'), os.path.join(directory, copasi_filename)])
#Doublecheck we have group write permissions
subprocess.check_call(['sudo', '-u', username, settings.CHMOD_LOCATION, 'g+w', os.path.join(directory, copasi_filename)])
#Finally, we can run condor_submit
p = subprocess.Popen(['sudo', '-u', username, settings.CONDOR_SUBMIT_LOCATION, condor_file],stdout=subprocess.PIPE, cwd=directory)
process_output = p.communicate()[0]
#Get condor_process number...
# process_id = int(process_output.splitlines()[2].split()[5].strip('.'))
#use a regular expression to parse the process output
try:
r=re.compile(r'[\s\S]*submitted to cluster (?P<id>\d+).*')
process_id = int(r.match(process_output).group('id'))
except:
process_id = -1 #Return -1 if for some reason the submit failed
logging.exception('Failed to submit job')
#TODO: Should we sleep here for a bit? 1s? 10s?
return process_id
def condor_rm(queue_id, username=None):
if not settings.SUBMIT_WITH_USERNAMES:
p = subprocess.Popen([settings.CONDOR_RM_LOCATION, str(queue_id)])
p.communicate()
else:
subprocess.check_call(['sudo', '-u', username, settings.CONDOR_RM_LOCATION, str(queue_id)])
#p.communicate()
def zip_up_dir(job):
"""Create a tar.bz2 file of the job directory"""
filename = os.path.join(job.get_path(), str(job.name) + '.tar.bz2')
if not os.path.isfile(filename):
import tarfile
tar = tarfile.open(name=filename, mode='w:bz2')
tar.add(job.get_path(), job.name)
tar.close()
def run():
#Set up logging, with the appropriate log level
logging.basicConfig(filename=settings.LOG_FILE,level=settings.LOG_LEVEL, format='%(asctime)s::%(levelname)s::%(message)s', datefmt='%Y-%m-%d, %H:%M:%S')
#Step one, load the jobs that have been confirmed, and need submitting to condor :
new_jobs = models.Job.objects.filter(status='N')
for job in new_jobs:
logging.debug('New job found: ' + str(job.id) + ', user: ' + str(job.user))
try:
#Load the model
model = CopasiModel(job.get_filename(), job=job)
#Prepare the .job files
#Check the job rank. If it's been set, use it. Otherwise set to 0
if job.rank != None or job.rank != '':
rank = job.rank
else:
rank = '0'
if job.job_type == 'SO':
condor_jobs = model.prepare_so_condor_jobs(rank=rank)
elif job.job_type == 'SS':
no_of_jobs = model.prepare_ss_task(job.runs, skip_load_balancing=job.skip_load_balancing)
condor_jobs = model.prepare_ss_condor_jobs(no_of_jobs, rank=rank)
elif job.job_type == 'PS':
no_of_jobs = model.prepare_ps_jobs(skip_load_balancing=job.skip_load_balancing)
condor_jobs = model.prepare_ps_condor_jobs(no_of_jobs, rank=rank)
elif job.job_type == 'OR':
no_of_jobs = model.prepare_or_jobs(job.runs, skip_load_balancing=job.skip_load_balancing)
condor_jobs = model.prepare_or_condor_jobs(no_of_jobs, rank=rank)
elif job.job_type == 'PR':
no_of_jobs = model.prepare_pr_jobs(job.runs, skip_load_balancing=job.skip_load_balancing, custom_report=job.custom_report)
condor_jobs = model.prepare_pr_condor_jobs(no_of_jobs, rank=rank)
elif job.job_type == 'SP':
##ALTER
no_of_jobs = model.prepare_sp_jobs(job.runs, skip_load_balancing = job.skip_load_balancing, custom_report=False)
condor_jobs = model.prepare_sp_condor_jobs(no_of_jobs, rank=rank)
elif job.job_type == 'OD':
#No need to prepare the job. This was done as the job was submitted
condor_jobs = model.prepare_od_condor_jobs(rank=rank)
elif job.job_type == 'RW':
no_of_jobs = model.prepare_rw_jobs(job.runs)
condor_jobs = model.prepare_rw_condor_jobs(no_of_jobs, job.raw_mode_args, rank=rank)
else:
continue
for cj in condor_jobs:
try:
condor_job_id = condor_submit(cj['spec_file'], username=str(job.user.username))
#Check that the condor job was submitted successfully
assert condor_job_id != -1
condor_job = models.CondorJob(parent=job, spec_file=cj['spec_file'], std_output_file=cj['std_output_file'], std_error_file = cj['std_error_file'], log_file=cj['log_file'], job_output=cj['job_output'], queue_status='Q', queue_id=condor_job_id)
condor_job.save()
if job.condor_jobs == None:
job.condor_jobs = 1
else:
job.condor_jobs += 1
except:
logging.exception('Error submitting job(s) to Condor; ensure condor scheduler service is running. Job: ' + str(job.id) + ', User: ' + str(job.user))
raise
logging.debug('Submitted ' + str(len(condor_jobs)) + ' to Condor')
job.status = 'S'
job.last_update=datetime.datetime.today()
job.save()
except Exception, e:
logging.warning('Error preparing job for condor submission. Job: ' + str(job.id) + ', User: ' + str(job.user))
logging.exception('Exception: ' + str(e))
job.status = 'E'
job.last_update=datetime.datetime.today()
job.finish_time=datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
############
#Step two, go through the condor_q output and update the status of our condor jobs
try:
condor_q_process = subprocess.Popen(settings.CONDOR_Q_LOCATION, stdout=subprocess.PIPE)
condor_q_output = condor_q_process.communicate()[0].splitlines()
#Process the output using regexps. Example line is as follows:
# ID OWNER SUBMITTED RUN_TIME ST PRI SIZE CMD
#18756.0 ed 1/7 11:45 0+03:19:53 R 0 22.0 CopasiSE.$$(OpSys)
condor_q=[]
no_of_jobs = len(condor_q_output) - 6
if no_of_jobs > 0:
job_string = r'\s*(?P<id>\d+)\.0\s+(?P<owner>\S+)\s+(?P<sub_date>\S+)\s+(?P<sub_time>\S+)\s+(?P<run_time>\S+)\s+(?P<status>\w)\s+(?P<pri>\d+)\s+(?P<size>\S+)\s+(?P<cmd>\S+)'
job_re = re.compile(job_string)
for job_listing in condor_q_output:
match = job_re.match(job_listing)
if match:
id = match.group('id')
status = match.group('status')
condor_q.append((id,status))
#Now, go through all jobs that, at the last update, had been submitted (status = 'Q'|'R'|'H'|'I')
submitted_jobs = models.CondorJob.objects.filter(queue_status='Q') | models.CondorJob.objects.filter(queue_status='R') | models.CondorJob.objects.filter(queue_status='I') | models.CondorJob.objects.filter(queue_status='H')
if len(submitted_jobs) > 0:
logging.debug('Checking condor_q status. ' + str(len(submitted_jobs)) + ' running jobs may be in queue')
#Check to see if each of these jobs is in the condor_q output
for submitted_job in submitted_jobs:
found=False
for id, status in condor_q:
#if so, update the status
if id == str(submitted_job.queue_id):
found = True
submitted_job.queue_status = status
submitted_job.save()
break
#If the job is not in the queue, we put it into an unknown state, until we can check it's log file fully to determine if it's finished running
if not found:
submitted_job.queue_status = 'U'
submitted_job.save()
except Exception, e:
logging.error('Error processing condor_q output. Ensure the condor scheduler service is running')
logging.error('Exception: ' + str(e))
############
#Now, for each CondorJob with a status 'U', read the log file. If the log file says that the job terminated successfully, then mark the job as 'F'; if it terminated unsuccessfully, then mark it as a 'U'. If we can't determine that the job terminated yet, then leave it as 'U'
unknown_jobs = models.CondorJob.objects.filter(queue_status='U')
for condor_job in unknown_jobs:
job = condor_job.parent
logging.debug('Checking unknown job ' + str(condor_job.queue_id))
try:
filename=os.path.join(condor_job.getDirectory(), condor_job.log_file)
log = condor_log.Log(filename)
if log.has_terminated:
if log.termination_status == 0:
condor_job.queue_status = 'F'
else:
condor_job.queue_status = 'E'
condor_job.save()
#If not terminated, leave as 'U'; do nothing
else:
logging.debug('Condor job ' + str(condor_job.queue_id) + ' has not terminated yet. Leaving status as unknown.') #Write to the log. Jobs with unknown status could indicate problems with Condor. Could consider emailing administrator if this happens...
except Exception, e:
logging.error('Could not verify job successfully completed. Job ' + str(condor_job.id))
logging.error('Exception: ' + str(e))
condor_job.queue_status = 'E'
condor_job.save()
###############
#Go through each of the model.Jobs with status 'S' (submitted) or 'X'(processing data on condor), and look at each of its child CondorJobs. If all have finished, mark the Job as 'W' (finished, waiting for processing). If any CondorJobs have been held ('H'), mark the Job as Error ('E')
submitted_jobs = models.Job.objects.filter(status='S') | models.Job.objects.filter(status='X')
for job in submitted_jobs:
try:
condor_jobs = models.CondorJob.objects.filter(parent=job)
error=False;
still_running=False
for condor_job in condor_jobs:
logging.debug('Condor job: ' + str(condor_job) + ', status: ' + condor_job.queue_status)
still_running_stats = ['N', 'Q', 'R', 'I', 'D', 'U']
if condor_job.queue_status in still_running_stats:
still_running = True
break
elif condor_job.queue_status == 'H':
logging.warning('Condor job id ' + str(condor_job.queue_id) + ' held')
error = True
break
elif condor_job.queue_status == 'E':
logging.warning('Condor job id ' + str(condor_job.queue_id) + ' held')
error = True
break
if error:
logging.warning('Job: ' + str(job.id) + ', User: ' + str(job.user) + ' did not complete successfully')
job.status='E'
job.finish_time=datetime.datetime.today()
job.last_update=datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
elif not still_running:
logging.debug('Job ' + str(job.id) + ', User: ' + str(job.user) + ' finished processing on condor')
#Open the log file and check the exit status
failed_job_count = 0
#keep a count of the total run time for the job
total_run_time = 0.0
for condor_job in condor_jobs:
try:
filename=os.path.join(condor_job.getDirectory(), condor_job.log_file)
log = condor_log.Log(filename)
assert log.termination_status == 0
#While we're here, update the CondorJob run time
condor_job.run_time = log.running_time_in_days
condor_job.save()
total_run_time += condor_job.run_time
except:
failed_job_count += 1
job.run_time = total_run_time
#Now, depending on the type of job, mark it as either 'error' nor not.
#For SS task, we require all jobs to have finished successfully
if failed_job_count > 0 and job.job_type == 'SS':
logging.exception('Condor job exited with nonzero return value. Condor Job: ' + str(condor_job.queue_id) + ', Job: ' + str(job.id) + ', User: ' + str(job.user))
job.status = 'E'
job.finish_time=datetime.datetime.today()
job.last_update=datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
#TODO: what about other jobs?
if job.status == 'X':
#If the second stage of condor processing has finished, mark the job as complete
#Open the log file and check the exit status for the results job first
try:
logging.debug('Checking results file')
filename=os.path.join(job.get_path(), 'results.log')
log = condor_log.Log(filename)
assert log.termination_status == 0
#While we're here, update the CondorJob run time
job.run_time += log.running_time_in_days
job.finish_time=datetime.datetime.today()
job.last_update=datetime.datetime.today()
job.status='C'
job.save()
except:
logging.exception('Condor job (results processing) exited with nonzero return value or could not read run time. Condor Job: ' + str(condor_job.queue_id) + ', Job: ' + str(job.id) + ', User: ' + str(job.user))
job.status = 'E'
job.finish_time=datetime.datetime.today()
job.last_update=datetime.datetime.today()
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
elif job.status != 'E':
#Otherwise mark it as waiting for local processing
job.status = 'W'
job.last_update=datetime.datetime.today()
#job.finish_time=datetime.datetime.today()
job.save()
else:
job.last_update=datetime.datetime.today()
job.save()
except Exception, e:
logging.warning('Error preparing job for condor submission. Job: ' + str(job.id) + ', User: ' + str(job.user))
logging.warning('Exception: ' + str(e))
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
############
#Collate the results
#Get the list of jobs marked as finished, waiting for processing
waiting = models.Job.objects.filter(status='W')
for job in waiting:
logging.debug('Processing results for complete job ' + str(job.id) + ', User: ' + str(job.user))
try:
model = CopasiModel(job.get_filename())
if job.job_type == 'SO':
#TODO: doesn't do validation step yet. This step should probably be condorised.
#Mark the job as complete
job.status='C'
job.finish_time=datetime.datetime.today()
job.last_update=datetime.datetime.today()
model.get_so_results(save=True)
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
elif job.job_type == 'SS':
#Collate the results, and ship them off in a new condor job to be averaged
#Use this to keep track of the number of jobs we split the task in to
condor_jobs = models.CondorJob.objects.filter(parent=job)
cj = model.prepare_ss_process_job(len(condor_jobs), job.runs, rank=job.rank)
#Submit the new job to condor
condor_job_id = condor_submit(cj['spec_file'], username=str(job.user.username), results=True)
#And store a new condor job in the database
condor_job = models.CondorJob(parent=job, spec_file=cj['spec_file'], std_output_file=cj['std_output_file'], std_error_file = cj['std_error_file'], log_file=cj['log_file'], job_output=cj['job_output'], queue_status='Q', queue_id=condor_job_id)
condor_job.save()
job.status='X' # Set the job status as processing on condor
#Update the condor job count
if job.condor_jobs == None:
job.condor_jobs = 1
else:
job.condor_jobs += 1
job.last_update=datetime.datetime.today()
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
elif job.job_type == 'PS':
condor_jobs = models.CondorJob.objects.filter(parent=job)
no_of_jobs = len(condor_jobs)
model.process_ps_results(no_of_jobs)
job.status = 'C'
job.last_update = datetime.datetime.today()
job.finish_time = datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
elif job.job_type == 'OR':
condor_jobs = models.CondorJob.objects.filter(parent=job)
no_of_jobs = len(condor_jobs)
#TODO: Do we need to collate any output files?
model.process_or_results(no_of_jobs)
job.status = 'C'
job.last_update = datetime.datetime.today()
job.finish_time = datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
#ALTER
elif job.job_type == 'SP':
condor_jobs = models.CondorJob.objects.filter(parent=job)
no_of_jobs = len(condor_jobs)
model.process_sp_results(no_of_jobs, custom_report=job.custom_report)
job.status = 'C'
job.last_update = datetime.datetime.today()
job.finish_time = datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
elif job.job_type == 'PR':
condor_jobs = models.CondorJob.objects.filter(parent=job)
no_of_jobs = len(condor_jobs)
model.process_pr_results(no_of_jobs, custom_report=job.custom_report)
#Save a copy of the model with the best parameter values
#Only do this if job.skip_model_generation != False
if not job.skip_model_generation:
model.create_pr_best_value_model('best_values.cps', custom_report=job.custom_report)
job.status = 'C'
job.last_update = datetime.datetime.today()
job.finish_time = datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
elif job.job_type == 'OD':
condor_jobs = models.CondorJob.objects.filter(parent=job)
output_files = [cj.job_output for cj in condor_jobs]
model.process_od_results(output_files)
job.status = 'C'
job.last_update = datetime.datetime.today()
job.finish_time = datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
elif job.job_type == 'RW':
condor_jobs = models.CondorJob.objects.filter(parent=job)
no_of_jobs = len(condor_jobs)
#At the moment, we don't process anything. Just save the jobs as complete
job.status = 'C'
job.last_update = datetime.datetime.today()
job.finish_time = datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
except:
logging.warning('Error processing results for job ' + str(job.id) + ', User: ' + str(job.user))
logging.exception('Exception:')
job.status='E'
job.finish_time=datetime.datetime.today()
job.last_update=datetime.datetime.today()
try:
zip_up_dir(job)
except:
logging.exception('Exception: could not zip up job directory for job ' + str(job.id))
job.save()
try:
email_notify.send_email(job)
except:
logging.exception('Exception: error sending email')
############
#Go through all condor jobs with status 'F' and blank run time
#Then extract the run time from the log and save it to the database
#This step should only be performed once, after the upgrade from version 0.4
#to 0.5. From then on, the run time should be automatically stored to the database
condor_jobs = models.CondorJob.objects.filter(queue_status='F').filter(run_time = None)
#To save doing another database hit, make a note if we're actually updating
#any jobs, since most of the time we won't be
if len(condor_jobs) > 0:
updated_legacy_jobs = True
else:
updated_legacy_jobs = False
for condor_job in condor_jobs:
try:
job = condor_job.parent
filename=os.path.join(condor_job.getDirectory(), condor_job.log_file)
log = condor_log.Log(filename)
condor_job.run_time = log.running_time_in_days
condor_job.save()
except:
logging.error('Error updating legacy job: ' + str(job.id))
try:
condor_job.run_time = 0.0
condor_job.save()
except:
pass
if updated_legacy_jobs:
legacy_jobs = models.Job.objects.filter(status='C').filter(run_time=None)
for job in legacy_jobs:
try:
condor_jobs = models.CondorJob.objects.filter(parent=job)
#keep a tally of total run time
total_run_time = 0.0
for condor_job in condor_jobs:
total_run_time += condor_job.run_time
job.run_time = total_run_time
job.save()
logging.debug('Updated run time for legacy job ' + str(job.id))
except:
logging.warning('Error calculating total run time for legacy job ' + str(job.id))
#Now we need to update the condor_jobs field for each job by counting the number of associated condor jobs.
jobs = models.Job.objects.filter(condor_jobs=None)
if len(jobs) > 0:
logging.debug('Jobs found without recorded condor_job count. Updating')
for job in jobs:
condor_job_set = models.CondorJob.objects.filter(parent=job)
condor_jobs = len(condor_job_set)
job.condor_jobs = condor_jobs
job.save()
############
#Go through completed jobs or jobs with errors, and remove anything older than settings.COMPLETED_JOB_REMOVAL_DAYS
complete = models.Job.objects.filter(status='C') | models.Job.objects.filter(status='E')
if settings.COMPLETED_JOB_REMOVAL_DAYS >0:
for job in complete:
try:
if datetime.datetime.today() - job.finish_time > datetime.timedelta(days=settings.COMPLETED_JOB_REMOVAL_DAYS):
logging.debug('Removing old job ' + str(job.id) + ', User: ' + str(job.user))
job.delete()
except Exception, e:
logging.warning('Error removing old job ' + str(job.id) + ', User: ' + str(job.user))
logging.warning('Exception: ' + str(e))
########
#Remove any unconfirmed jobs older than 30 mins
unconfirmed = models.Job.objects.filter(status='U')
for job in unconfirmed:
if datetime.datetime.today() - job.submission_time > datetime.timedelta(minutes=30):
logging.debug('Removing old unconfirmed job ' + str(job.id) + ', User: ' + str(job.user))
job.delete()
##########
#Remove any jobs marked for deletion
deletion = models.Job.objects.filter(status='D')
for job in deletion:
try:
#First remove any condor jobs associated with the job
condor_jobs = models.CondorJob.objects.filter(parent=job)
for cj in condor_jobs:
if cj.queue_status == 'Q' or cj.queue_status == 'R' or cj.queue_status == 'I' or cj.queue_status == 'H':
condor_rm(cj.queue_id, job.user.username)
logging.debug('Removing condor job ' + str(cj.queue_id) + ', User: ' + str(job.user))
cj.delete()
logging.debug('Removing job marked for deletion: ' + str(job.id) + ', User: ' + str(job.user))
job.delete()
except:
logging.exception('Error removing marked for deletion job ' + str(job.id))
###########
#Update the condor status
try:
condor_status.run()
#logging.debug('Updated Condor status')
except:
logging.exception('Error updating the condor status')
if __name__ == '__main__':
run()
| Nucleoos/condor-copasi | condor-copasi-daemon/background_run.py | Python | artistic-2.0 | 32,291 | [
"COPASI"
] | 7012c866e607bd53fc080ae598e545b1289689154fbb8db0a7ab721bb9793722 |
#get the default AMI's and titles for the 8 EC2 datacenters through endless screen scraping
#please destroy me when EC2 provides this data through JSON ;)
import time, datetime, sys, re
try:
from splinter import Browser
except:
sys.exit('install splinter, ./bin/pip install splinter')
EC2_USER = ""
EC2_PASS = ""
#user + pass you login on console.aws.amazon.com
URL = 'https://console.aws.amazon.com/'
browser = Browser()
browser.visit(URL)
browser.find_by_css('#ap_email').fill(EC2_USER)
browser.find_by_css('#ap_password').fill(EC2_PASS)
browser.find_by_css('#signInSubmit-input').click()
then = datetime.datetime.now()
for provider in ['eu-central-1', 'us-east-1', 'us-west-2', 'us-west-1', 'eu-west-1', 'ap-southeast-1', 'ap-northeast-1', 'ap-southeast-2','sa-east-1', 'ap-northeast-2']:
provider_images = []
browser.visit('https://console.aws.amazon.com/ec2/v2/home?region=%s' % provider)
time.sleep(10)
#wait to load otherwise it fails
browser.find_by_css('.gwt-Button').click()
time.sleep(1)
divs = browser.find_by_css('.NB')
divs.extend(browser.find_by_css('.PB'))
divs.extend(browser.find_by_css('.BC'))
print ' \'%s\': {' % provider
for div in divs:
if 'Microsoft Windows' in div.value:
continue
value = div.value.split('\n')
if 'ami' in value[3]:
ami_in = value[3]
else:
ami_in = value[4]
name = ami_in.split(' - ')[0]
a = re.findall(r'ami-\w+\s+\(64-bit\)', ami_in)
if a:
ami = a[0].split(' (64-bit)')[0]
print ' \'%s\': \'%s (64-bit)\',' % (ami, name)
b = re.findall(r'ami-\w+\s+\(32-bit\)', ami_in)
if b:
ami = b[0].split(' (32-bit)')[0]
print ' \'%s\': \'%s (32-bit)\',' % (ami, name)
c = re.findall(r'ami-\w+$', ami_in)
if c:
print ' \'%s\': \'%s\',' % (c[0], name)
print ' },'
#lame formating for easy copy paste to ec2_images.EC2_IMAGES dict
now = datetime.datetime.now()
total = (now-then).seconds
print 'took %s seconds\n' % total
| Lao-liu/mist.io | scripts/get_ec2_amis.py | Python | agpl-3.0 | 2,135 | [
"VisIt"
] | a6ceff426abc3bae997ce964bf7c68fb632d8b75dcc7fdc4ec7dd1de14ab749a |
# -*- coding: utf-8 -*-
import random
import chatbot
from commands import general_chat, send_whisper
__all__ = [ 'PLUGIN', 'init' ]
PLUGIN = {
'name': 'lazytree',
'requires': ('chatbot',),
'blocks': (),
}
# -----------------------------------------------------------------------------
greetings = {
"Hi {0}!" : 4,
"Hey {0}" : 3,
"Yo {0}" : 2,
"{0}!!!!" : 1,
"{0}!!!" : 1,
"{0}!!" : 1,
"Hello {0}" : 5,
"Hello {0}!" : 5,
"Welcome back {0}" : 3,
"Hello {0}! You are looking lovely today!" : 1,
"{0} is back!!" : 1,
"Hello and welcome to the Aperture Science \
computer-aided enrichment center." : 1,
}
drop_items = [
"a bomb", "a bowl of petunias", "a cake", "a candy", "a chocobo",
"a coin", "a cookie", "a drunken pirate", "a freight train",
"a fruit", "a mouboo", "an angry cat",
"an angry polish spelling of a rare element with the atomic number 78",
"an anvil", "an apple", "an iten", "a magic eightball", "a GM",
"a whale", "an elephant", "a piano", "a piece of moon rock", "a pin",
"a rock", "a tub", "a wet mop", "some bass", "Voldemort", "a sandworm",
"a princess", "a prince", "an idea", "Luvia", "a penguin",
"The Hitchhiker's Guide to the Galaxy",
]
dropping_other = [
"Hu hu hu.. {0} kicked me!",
"Ouch..",
"Ouchy..",
"*drops dead*",
"*sighs*",
"Leaf me alone.",
"Stop it! I doesn't drop branches, try the Druid tree for once!",
]
dropping_special = {
"ShaiN2" : "*drops a nurse on {0}*",
"Shainen" : "*drops a nurse on {0}*",
"Silent Dawn" : "*drops a box of chocolate on {0}*",
"veryape" : "*drops a chest of rares on {0}*",
"veryapeGM" : "*drops a chest of rares on {0}*",
"Ginaria" : "*drops a bluepar on {0}*",
"Rift Avis" : "*drops an acorn on {0}*",
}
die_answers = [
"*drops a bomb on {0}'s head*",
"*drops a bowl of petunias on {0}'s head*",
"*drops a drunken pirate on {0}'s head*",
"*drops a freight train on {0}'s head*",
"*drops a mouboo on {0}'s head*",
"*drops an angry cat on {0}'s head*",
"*drops an angry polish spelling of a rare element with \
the atomic number 78 on {0}'s head*",
"*drops an iten on {0}'s head*",
"*drops a piano on {0}'s head*",
"*drops a piece of moon rock on {0}'s head*",
"*drops Voldemort on {0}'s head*",
"*drops dead*",
"*sighs*",
"Avada Kedavra!",
"Make me!",
"Never!!",
"You die, {0}!",
"You die, {0}!",
"You die, {0}!",
"You die, {0}!",
"No!",
"In a minute..",
"Suuure... I'll get right on it",
]
healme_answers = [
"Eat an apple, they're good for you.",
"If I do it for you, then I have to do it for everybody.",
"Oh, go drink a potion or something.",
"Whoops! I lost my spellbook.",
"no mana",
]
whoami_answers = [
"An undercover GM.",
"An exiled GM.",
"I'm not telling you!",
"I'm a bot! I'll be level 99 one day! Mwahahahaaha!!!111!",
"Somebody said I'm a Chinese copy of Confused Tree",
"I am your evil twin.",
"I don't remember anything after I woke up! What happened to me?",
"I don't know. Why am I here??",
"Who are you?",
"On the 8th day, God was bored and said 'There will be bots'. \
So here I am.",
"♪ I'm your hell, I'm your dream, I'm nothing in between ♪♪",
"♪♪ Aperture Science. We do what we must, because.. we can ♪",
"I'm just a reincarnation of a copy.",
]
joke_answers = [
"How did the tree get drunk? On root beer.",
"Do you think I'm lazy?",
"I miss Confused Tree :(",
"I miss CrazyTree :(",
"I'm not telling you!",
"*sighs*",
"If I do it for you, then I have to do it for everybody.",
"What did the beaver say to the tree? It's been nice gnawing you.",
"What did the little tree say to the big tree? Leaf me alone.",
"What did the tree wear to the pool party? Swimming trunks.",
"What do trees give to their dogs? Treets.",
"What do you call a tree that only eats meat? Carniforous.",
"What do you call a tree who's always envious? Evergreen.",
"What is the tree's least favourite month? Sep-timber!",
"What kind of tree can fit into your hand? A palm-tree.",
"What was the tree's favorite subject in school? Chemistree.",
"Why did the leaf go to the doctor? It was feeling green.",
"Why doesn't the tree need sudo? Because it has root.",
"Why was the cat afraid of the tree? Because of its bark.",
"Why was the tree executed? For treeson.",
"How do trees get on the internet? They log in.",
"Why did the pine tree get into trouble? Because it was being knotty.",
"Did you hear the one about the oak tree? It's a corn-y one!",
"What do you call a blonde in a tree with a briefcase? Branch Manager.",
"How is an apple like a lawyer? They both look good hanging from a tree.",
"Why did the sheriff arrest the tree? Because its leaves rustled.",
"I'm to tired, ask someone else.",
"If you are trying to get me to tell jokes you are barking \
up the wrong tree!",
"You wodden think they were funny anyhow. Leaf me alone!",
"What is brown and sticky? A stick.",
]
burn_answers = [
"*curses {0} and dies %%c*",
"Help! I'm on fire!",
"Oh hot.. hot hot!",
"*is glowing*",
"*is flaming*",
"ehemm. where are firefighters? I need them now!",
"*is so hot!*",
]
noidea_answers = [
"what?", "what??", "whatever", "hmm...", "huh?", "*yawns*",
"Wait a minute..", "What are you talking about?",
"Who are you?", "What about me?",
"I don't know what you are talking about",
"Excuse me?", "very interesting", "really?",
"go on...", "*scratches its leafy head*",
"*feels a disturbance in the force*",
"*senses a disturbance in the force*",
"*humming*", "I'm bored..", "%%j", "%%U", "%%[",
]
pain_answers = [ "Ouch..", "Ouchy..", "Argh..", "Eckk...", "*howls*",
"*screams*", "*groans*", "*cries*", "*faints*", "%%k",
"Why.. What did I do to you? %%i" ]
hurt_actions = [ "eat", "shoot", "pluck", "torture", "slap", "poison",
"break", "stab", "throw" ]
ignored_players = []
tree_admins = [ 'TestChar2' ]
# -----------------------------------------------------------------------------
def say_greeting(nick, _, is_whisper, match):
if is_whisper:
return
if nick in ignored_players:
return
total_weight = 0
for w in greetings.itervalues():
total_weight += w
random_weight = random.randint(0, total_weight)
total_weight = 0
random_greeting = 'Hi {0}'
for g, w in greetings.iteritems():
if total_weight >= random_weight:
random_greeting = g
break
total_weight += w
general_chat(random_greeting.format(nick))
def drop_on_head(nick, _, is_whisper, match):
if is_whisper:
return
if nick in ignored_players:
return
answer = 'yeah'
if nick in dropping_special:
answer = dropping_special[nick]
else:
r = random.randint(0, len(drop_items) + len(dropping_other))
if r < len(drop_items):
answer = "*drops {} on {}'s head*".format(drop_items[r], nick)
else:
answer = random.choice(dropping_other)
general_chat(answer.format(nick))
def answer_threat(nick, _, is_whisper, match):
if is_whisper:
return
if nick in ignored_players:
return
answer = random.choice(die_answers)
general_chat(answer.format(nick))
# -----------------------------------------------------------------------------
def admin_additem(nick, _, is_whisper, match):
if not is_whisper:
return
if nick not in tree_admins:
return
item = match.group(1)
if item not in drop_items:
drop_items.append(item)
send_whisper(nick, "Added item '{}' to drop list".format(item))
def admin_addjoke(nick, _, is_whisper, match):
if not is_whisper:
return
if nick not in tree_admins:
return
joke = match.group(1)
if joke not in joke_answers:
joke_answers.append(joke)
send_whisper(nick, "Added joke")
# -----------------------------------------------------------------------------
tree_commands = {
r'^(hello|hi|hey|heya|hiya|yo) (tree|LazyTree)' : say_greeting,
r'^(hello|hi|hey|heya|hiya) (all|everybody|everyone)$' : say_greeting,
r'\*?((shake|kick)s?) (tree|LazyTree)' : drop_on_head,
r'(die|\*?((nuke|kill)s?)) (tree|LazyTree)' : answer_threat,
r'\*?(burn(s?)) (tree|LazyTree)' : burn_answers,
r'^tell (.*)joke([ ,]{1,2})tree' : joke_answers,
r'^heal me([ ,]{1,2})tree' : healme_answers,
r'^(who|what) are you([ ,]{1,3})tree' : whoami_answers,
r'\*?(' + '|'.join(hurt_actions) + ')s? (tree|LazyTree)' : pain_answers,
r'^!additem (.*)' : admin_additem,
r'^!addjoke (.*)' : admin_addjoke,
}
def init(config):
chatbot.commands.update(tree_commands)
| mekolat/manachat | plugins/lazytree.py | Python | gpl-2.0 | 9,027 | [
"Galaxy"
] | cd16cd34d6de0edd5e556fdd5db436032070d9b5cda2114a36ddc26f37296c4d |
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
from concurrent.futures import _base
import itertools
import queue
import threading
import weakref
import os
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpreter shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as exc:
self.future.set_exception(exc)
# Break a reference cycle with the exception 'exc'
self = None
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
# Used to assign unique thread names when thread_name_prefix is not supplied.
_counter = itertools.count().__next__
def __init__(self, max_workers=None, thread_name_prefix=''):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
thread_name_prefix: An optional name prefix to give our threads.
"""
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (os.cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._thread_name_prefix = (thread_name_prefix or
("ThreadPoolExecutor-%d" % self._counter()))
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
num_threads = len(self._threads)
if num_threads < self._max_workers:
thread_name = '%s_%d' % (self._thread_name_prefix or self,
num_threads)
t = threading.Thread(name=thread_name, target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__
| prefetchnta/questlab | bin/x64bin/python/36/Lib/concurrent/futures/thread.py | Python | lgpl-2.1 | 5,664 | [
"Brian"
] | fdd0148415b07091194349d1fe22fc4b56ba48d8e336524b7bc46c68126e5800 |
# sql/elements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core sql expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
from .base import _generative, Generative
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(expr, lower_bound, upper_bound, symmetric=False):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce sql resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`.ColumnElement.between` method available on all
sql expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`.ColumnElement` subclass. For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a :class:`.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the
lower bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
:param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note
that not all databases support this syntax.
.. versionadded:: 0.9.5
.. seealso::
:meth:`.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound, symmetric=symmetric)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are
used in a comparison operation with a :class:`.ColumnElement` subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expression, type_):
"""Associate a sql expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce sql that is usually otherwise unaffected
by the :func:`.type_coerce` call::
SELECT date_string FROM log
However, when result rows are fetched, the ``StringDateTime`` type
will be applied to result rows on behalf of the ``date_string`` column.
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the :meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compilation/execution time
when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
:func:`.type_coerce` is similar to the :func:`.cast` function,
except that it does not render the ``CAST`` expression in the resulting
statement.
:param expression: A sql expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound literal
value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the expression is coerced.
.. seealso::
:func:`.cast`
"""
type_ = type_api.to_instance(type_)
if hasattr(expression, '__clause_element__'):
return type_coerce(expression.__clause_element__(), type_)
elif isinstance(expression, BindParameter):
bp = expression._clone()
bp.type = type_
return bp
elif not isinstance(expression, Visitable):
if expression is None:
return Null()
else:
return literal(expression, type_=type_)
else:
return Label(None, expression, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed sql
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
description = None
_order_by_label_element = None
_is_from_container = False
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_clauseelement(self, multiparams, params)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:meth:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this sql expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine,
if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select([t]).where(t.c.x == 5)
print s.compile(compile_kwargs={"literal_binds": True})
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
"""'and' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return and_(self, other)
def __or__(self, other):
"""'or' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return or_(self, other)
def __invert__(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(operators.ColumnOperators, ClauseElement):
"""Represent a column-oriented sql expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a sql expression, including
the expressions themselves, sql functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the sql expression
level, and are intended to accept instances of :class:`.ColumnElement` as
arguments. These functions will typically document that they accept a
"sql expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`.ColumnElement` object, or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to sql expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`.ColumnElement`. The Python value will ultimately be sent
to the DBAPI at execution time as a paramterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature a method called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`.ColumnElement` expression. The
``__clause_element__()`` method, if present, should return a
:class:`.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound
attributes on ORM-mapped classes; a ``User`` class which contains a
mapped attribute named ``.name`` will have a method
``User.name.__clause_element__()`` which when invoked returns the
:class:`.Column` called ``name`` associated with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic sql operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
.. seealso::
:class:`.Column`
:func:`.expression.column`
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
_key_label = key = None
_alt_names = ()
def self_group(self, against=None):
if (against in (operators.and_, operators.or_, operators._asbool) and
self.type._type_affinity
is type_api.BOOLEANTYPE._type_affinity):
return AsBoolean(self, operators.istrue, operators.isfalse)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(
self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass
the comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon'))
)
class BindParameter(ColumnElement):
"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a sql expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a sql statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce sql similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all sql expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render sql such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see sql logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the Postgresql database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce sql output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
sql statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being autoamtically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label(
'%%(%d %s)s' % (id(self), self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a sql statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal sql text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`.Text` construct is produced using the :func:`.text`
function; see that function for full documentation.
.. seealso::
:func:`.text`
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text,
bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
def _create_text(self, text, bind=None, bindparams=None,
typemap=None, autocommit=None):
"""Construct a new :class:`.TextClause` clause, representing
a textual sql string directly.
E.g.::
fom sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other sql expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For sql statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\\:username'")
The :class:`.TextClause` construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The
:meth:`.TextClause.bindparams` method is used to provide bound
parameter detail, and :meth:`.TextClause.columns` method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\\
bindparams(user_id=7).\\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`.text` construct is used internally in cases when
a literal string is specified for part of a larger query, such as
when a string is specified to the :meth:`.Select.where` method of
:class:`.Select`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`.text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
can be set explicitly so using the
:paramref:`.Connection.execution_options.autocommit` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`.text` constructs implicitly - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the sql statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
Deprecated. A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
This argument now invokes the :meth:`.TextClause.bindparams`
method on the construct before returning it. E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
Is equivalent to::
stmt = text("SELECT * FROM table WHERE id=:id").\\
bindparams(bindparam('id', value=5, type_=Integer))
.. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method
supersedes the ``bindparams`` argument to :func:`.text`.
:param typemap:
Deprecated. A dictionary mapping the names of columns
represented in the columns clause of a ``SELECT`` statement
to type objects,
which will be used to perform post-processing on columns within
the result set. This parameter now invokes the
:meth:`.TextClause.columns` method, which returns a
:class:`.TextAsFrom` construct that gains a ``.c`` collection and
can be embedded in other expressions. E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
Is equivalent to::
stmt = text("SELECT * FROM table").columns(id=Integer,
name=String)
Or alternatively::
from sqlalchemy.sql import column
stmt = text("SELECT * FROM table").columns(
column('id', Integer),
column('name', String)
)
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=True)')
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`.TextClause.bindparams` method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`.TextClause.bindparams` method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`.TextClause.bindparams` first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
.. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method
supersedes the argument ``bindparams`` passed to
:func:`~.expression.text`.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
existing = new_params[bind.key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind.key)
else:
new_params[existing.key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key)
else:
new_params[key] = existing._with_value(value)
@util.dependencies('sqlalchemy.sql.selectable')
def columns(self, selectable, *cols, **types):
"""Turn this :class:`.TextClause` object into a :class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the sql expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we used untyped :func:`.column` elements. These can also have
types specified, which will impact how the column behaves in
expressions as well as determining result set behavior::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
Keyword arguments allow just the names and types of columns to be
specified, where the :func:`.column` elements will be generated
automatically::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as
:meth:`.SelectBase.cte` against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`.text` can now be converted into a
fully featured "selectable" construct using the
:meth:`.TextClause.columns` method. This method supersedes the
``typemap`` argument to :func:`.text`.
"""
input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
] + [ColumnClause(key, type_) for key, type_ in types.items()]
return selectable.TextAsFrom(self, input_cols)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict((b.key, clone(b, **kw))
for b in self._bindparams.values())
def get_children(self, **kwargs):
return list(self._bindparams.values())
def compare(self, other):
return isinstance(other, TextClause) and other.text == self.text
class Null(ColumnElement):
"""Represent the NULL keyword in a sql statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = 'null'
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.Null` construct."""
return NULL
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a sql statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = 'false'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return TRUE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return FALSE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a sql statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = 'true'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return FALSE
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._singleton()
else:
return other
@classmethod
def _singleton(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return TRUE
def compare(self, other):
return isinstance(other, True_)
NULL = Null()
FALSE = False_()
TRUE = True_()
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(_literal_as_text(clause).
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor")
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = util.coerce_generator_arg(clauses)
for clause in clauses:
clause = _literal_as_text(clause)
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [c.self_group(against=operator)
for c in convert_clauses]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`.Select.where` method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\\
where(users_table.c.name == 'wendy').\\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a sql tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop('type_', self._type_tuple[0]
if self._type_tuple else type_api.NULLTYPE)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=type_, unique=True)
for o, type_ in zip(obj, self._type_tuple)
]).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a ``CASE`` expression.
The ``CASE`` construct in sql is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
constructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce sql resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select([users_table]).\\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
sql expressions, e.g. :class:`.ColumnElement` constructs, are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`.literal_column` construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against,
:paramref:`.case.whens` accepts two different forms, based on
whether or not :paramref:`.case.value` is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple
consists of ``(<sql expression>, <value>)``, where the sql
expression is a boolean expression and "value" is a resulting value,
e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison
values mapped to a resulting value; this form requires
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional sql expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional sql expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaulate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Produce a :class:`.ColumnClause` object that has the
:paramref:`.column.is_literal` flag set to True.
:func:`.literal_column` is similar to :func:`.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`.column` stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`.literal_column` can be that, or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any sql expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
.. seealso::
:func:`.column`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:func:`.cast`
"""
__visit_name__ = 'cast'
def __init__(self, expression, type_):
"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce sql resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting sql string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in sql.
:param expression: A sql expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:func:`.type_coerce` - Python-side type coercion without emitting
CAST.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a sql EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(
against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\\
order_by(nullsfirst(desc(users_table.c.name)))
The sql expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullsfirst`, rather than as its standalone
function version, as in::
stmt = (select([users_table]).
order_by(users_table.c.name.desc().nullsfirst())
)
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullsfirst_op)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\\
order_by(nullslast(desc(users_table.c.name)))
The sql expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullslast`, rather than as its standalone
function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullslast_op)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce sql as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`.ColumnElement.desc` method available on all sql expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`.ColumnElement` (e.g. scalar sql expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.desc_op)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce sql as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`.ColumnElement.asc` method available on all sql expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`.ColumnElement` (e.g. scalar sql expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.asc_op)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`.Select.distinct` method of :class:`.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`.ColumnElement.distinct`
:meth:`.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(
expr, operator=operators.distinct_op, type_=expr.type)
@util.memoized_property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
def self_group(self, against=None):
return self
def _negate(self):
return self.element._negate()
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=type_api.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
def self_group(self, against=None):
return self
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` sql keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`.Column` class, is typically invoked using the
:func:`.column` function, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce sql like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`.Column` object. While the :class:`.Column` class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple sql expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`.Column` does, so in that sense is a "lightweight"
version of :class:`.Column`.
Full details on :class:`.ColumnClause` usage is at :func:`.column`.
.. seealso::
:func:`.column`
:class:`.Column`
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`.Column` class. The :func:`.column` function can
be invoked with just a name alone, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce sql like::
SELECT id, name FROM user
Once constructed, :func:`.column` may be used like any other sql
expression element such as within :func:`.select` constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`.column` is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual sql
expression that is rendered exactly without any quoting,
use :func:`.literal_column` instead, or pass ``True`` as the
value of :paramref:`.column.is_literal`. Additionally, full sql
statements are best handled using the :func:`.text` construct.
:func:`.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`.Table`) to produce
a working table construct with minimal boilerplate::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`.column` / :func:`.table` construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any
:class:`.schema.MetaData`, DDL, or events, unlike its
:class:`.Table` counterpart.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`.literal_column()` function essentially invokes
:func:`.column` while passing ``is_literal=True``.
.. seealso::
:class:`.Column`
:func:`.literal_column`
:func:`.table`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or self.table._textual or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and
(other.is_literal or
other.table is None or
other.table._textual)
):
return (hasattr(other, 'name') and self.name == other.name) or \
(hasattr(other, '_label') and self._label == other._label)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.text_type):
"""Represent a sql identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`.Table`, :class:`.Column`, and others. The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`.Engine.has_table` method with
an unconditionally quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
@util.memoized_instancemethod
def lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
@util.memoized_instancemethod
def upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
backslashed = self.encode('ascii', 'backslashreplace')
if not util.py2k:
backslashed = backslashed.decode('ascii')
return "'%s'" % backslashed
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`.schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`.schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
class _defer_name(_truncated_label):
"""mark a name as 'deferred' for the purposes of automated name
generation.
"""
def __new__(cls, value):
if value is None:
return _NONE_NAME
elif isinstance(value, conv):
return value
else:
return super(_defer_name, cls).__new__(cls, value)
def __reduce__(self):
return self.__class__, (util.text_type(self), )
class _defer_none_name(_defer_name):
"""indicate a 'deferred' name that was ultimately the value None."""
_NONE_NAME = _defer_none_name("_unnamed_")
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). It is only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"sql expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a sql expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return ColumnClause(str(element), is_literal=True)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key', 'table'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def anon_label(self):
return self._Annotated__element.anon_label
| ncdesouza/bookworm | env/lib/python2.7/site-packages/sqlalchemy/sql/elements.py | Python | gpl-3.0 | 121,469 | [
"VisIt"
] | d64dd26b0e978fd25c515d718e7f5bd2b4424182e5dbfb967c7d5b56152e42d8 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:synopsis: Prompts the user to add network paths and username passwords for
e.g. smb paths
"""
from __future__ import absolute_import, division, unicode_literals
from logging import getLogger
import re
import socket
import urllib
import xbmc
from .. import path_ops, utils
LOG = getLogger('PLEX.direct_path_sources')
SUPPORTED_PROTOCOLS = ('smb', 'nfs', 'http', 'https', 'ftp', 'sftp')
PATH = path_ops.translate_path('special://userdata/')
def get_etree(topelement):
try:
xml = utils.defused_etree.parse(
path_ops.path.join(PATH, '%s.xml' % topelement))
except IOError:
# Document is blank or missing
LOG.info('%s.xml is missing or blank, creating it', topelement)
root = utils.etree.Element(topelement)
except utils.ParseError:
LOG.error('Error parsing %s', topelement)
# "Kodi cannot parse {0}. PKC will not function correctly. Please visit
# {1} and correct your file!"
utils.messageDialog(utils.lang(29999), utils.lang(39716).format(
'%s.xml' % topelement, 'http://forum.kodi.tv/'))
return
else:
root = xml.getroot()
return root
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def is_valid_ip(ip):
try:
socket.inet_aton(ip)
# legal
except socket.error:
return False
return True
def start():
"""
Hit this function to start entering network credentials
"""
LOG.info('Editing sources.xml and passwords.xml')
# Fix for:
# DEBUG: Activating window ID: 13000
# INFO: Activate of window '13000' refused because there are active modal dialogs
# DEBUG: Activating window ID: 13000
xbmc.executebuiltin("Dialog.Close(all, true)")
# "In the following window, enter the server's hostname (or IP) where your
# Plex media resides. Mind the case!"
utils.messageDialog(utils.lang(29999), utils.lang(30200))
# "Enter server hostname (or IP)"
hostname = utils.dialog('input', utils.lang(30201))
if not hostname:
return
hostname = hostname.decode('utf-8').strip()
if not is_valid_hostname(hostname) and not is_valid_ip(hostname):
LOG.error('Entered invalid hostname or IP: %s', hostname)
# "The hostname or IP '{0}' that you entered is not valid"
utils.messageDialog(utils.lang(29999),
utils.lang(30204).format(hostname))
return
# "In the following window, enter the network protocol you would like to
# use. This is likely 'smb'."
utils.messageDialog(utils.lang(29999), utils.lang(30202))
# "Enter network protocol"
protocol = utils.dialog('input', utils.lang(30203))
if not protocol:
return
protocol = protocol.decode('utf-8').lower().strip()
if protocol not in SUPPORTED_PROTOCOLS:
LOG.error('Entered invalid protocol %s', protocol)
# "The protocol '{0}' that you entered is not supported."
utils.messageDialog(utils.lang(29999),
utils.lang(30205).format(protocol))
return
path = '%s://%s' % (protocol, hostname)
# Trailing slash at the end
paths = (path, '%s/' % path)
# Add hostname to sources.xml, if not already there
LOG.info('Hostname we are adding to sources.xml and passwords.xml: %s',
path)
try:
with utils.XmlKodiSetting('sources.xml',
force_create=True,
top_element='sources') as xml:
files = xml.root.find('files')
if files is None:
files = utils.etree.SubElement(xml.root, 'files')
utils.etree.SubElement(files,
'default',
attrib={'pathversion': '1'})
for source in files:
entry = source.find('path')
if entry is None:
LOG.debug('Entry is None')
continue
LOG.debug('entry found: %s', entry.text)
if entry.text in paths:
LOG.debug('Already have %s in sources.xml', path)
break
else:
# Need to add an element for our hostname
LOG.debug('Adding subelement to sources.xml for %s', hostname)
source = utils.etree.SubElement(files, 'source')
utils.etree.SubElement(source, 'name').text = 'PKC %s' % hostname
utils.etree.SubElement(source,
'path',
attrib={'pathversion': '1'}).text = '%s/' % path
utils.etree.SubElement(source, 'allowsharing').text = 'false'
xml.write_xml = True
except utils.ParseError:
return
# Add or change username and password in passwords.xml
try:
with utils.XmlKodiSetting('passwords.xml',
force_create=True,
top_element='passwords') as xml:
for entry in xml.root:
source = entry.find('from')
if source is None:
continue
if source.text in paths:
LOG.debug('Found an existing passwords.xml entry for %s, '
'replacing it',
path)
xml.root.remove(entry)
entry = utils.etree.SubElement(xml.root, 'path')
# "Username"
user = utils.dialog('input', utils.lang(1014))
if user is None:
xml.write_xml = False
return
user = user.strip()
user = urllib.quote(user)
user = user.decode('utf-8')
# "Password"
# May also be blank!! (=user aborts dialog)
password = utils.dialog('input',
utils.lang(733),
'',
type='{alphanum}',
option='{hide}')
password = urllib.quote(password)
password = password.decode('utf-8')
utils.etree.SubElement(entry,
'from',
attrib={'pathversion': '1'}).text = '%s/' % path
login = '%s://%s:%s@%s/' % (protocol, user, password, hostname)
utils.etree.SubElement(entry,
'to',
attrib={'pathversion': '1'}).text = login
xml.write_xml = True
except utils.ParseError:
return
LOG.info('Successfully completed editing sources.xml and padsswords.xml')
| croneter/PlexKodiConnect | resources/lib/windows/direct_path_sources.py | Python | gpl-2.0 | 7,136 | [
"VisIt"
] | 1c60ab32c1cac3bd6134f5be3172420eaaa5c3feebe5f7a4305759414287ff20 |
#!/usr/bin/env python
#
# Copyright (c) 2001 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
#
# $Id: std.py,v 1.1 2002/01/29 20:07:05 zooko Exp $
# This is a silly hack to get around Python's circular import limitation.
# See, standard modules like "debug", "humaneadable" and "idlib" all need to import each other, but should be in separate modules.
# (The loathsome `mojostd.py' module, pronounced "mojostupid", is an earlier klooge for the same purpose.)
# The `std.py' klooge is that each of those modules imports std at module import time, but then pokes a reference to its standard
# features *into* std so that other people can use those features (at runtime), with e.g. `std.hr()', `std.mojolog', `std.is_sloppy_id()', etc.
| zooko/egtp | common/std.py | Python | agpl-3.0 | 862 | [
"VisIt"
] | 79b78b5b3b0e30e5d1598031390b1ba2304a8d22e60821fc664a47649ec4a2ca |
""" Testing the FCConditionPaserClass
"""
import unittest
from DIRAC.Resources.Catalog.FCConditionParser import FCConditionParser
__RCSID__ = "$Id $"
class TestLogicEvaluation( unittest.TestCase ):
""" Tests all the logic evaluation
"""
def setUp( self ):
self.fcp = FCConditionParser()
self.lfns = ['/lhcb/lfn1', '/lhcb/lfn2']
def test_01_simpleParse( self ):
"""Test the parse of a single plugin"""
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=False" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
def test_02_notLogic( self ):
"""Testing the ! operator"""
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "!Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "!Dummy=False" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
def test_03_andLogic( self ):
"""Testing the & operator"""
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=True & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=False & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=True & Dummy=False" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
def test_04_orLogic( self ):
"""Testing the | operator"""
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=True | Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=False | Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=False | Dummy=False" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
def test_05_priority( self ):
"""Testing the priority of operators"""
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "!Dummy=False & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "!Dummy=True | Dummy=False" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "Dummy=True & Dummy=False | Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "Dummy=True | Dummy=False & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "!Dummy=True | Dummy=False & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "!Dummy=True | !Dummy=False & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "!Dummy=True | !Dummy=False & !Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "[!Dummy=False] & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "![Dummy=False] & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "![Dummy=False & Dummy=True]" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "[Dummy=True | Dummy=False] & Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "Dummy=True | [Dummy=False & Dummy=True]" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
res = self.fcp( 'catalogName', 'operationName', self.lfns ,
condition = "Dummy=False | [Dummy=False & Dummy=True]" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
def test_06_errors( self ):
"""Testing different error situation"""
# Error in the plugin
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "Dummy=CantParse" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
# Non existing plugin
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "NonExistingPlugin=something" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
# Error in the grammar
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "[Dummy=True" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue( not res['Value']['Successful'][lfn], res )
def test_07_noCondition( self ):
"""Testing different error situation"""
# Non condition given
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = "" )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
# Can't retrive conditions
# It so happen that it will all be True
res = self.fcp( 'catalogName', 'operationName', self.lfns , condition = None )
self.assertTrue( res['OK'], res )
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue( res['Value']['Successful'][lfn], res )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestLogicEvaluation )
unittest.TextTestRunner( verbosity = 2 ).run( suite )
| andresailer/DIRAC | Resources/Catalog/test/Test_FCConditionParser.py | Python | gpl-3.0 | 9,153 | [
"DIRAC"
] | 2313d3183d37fbad954ae23318452057e5a9ede23210558ec8299621505d98fe |
"""
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\widebar' : ('cmr10', 131),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\#' : ('cmr10', 39),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
r'\prime' : ('cmsy10', 73),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
r'?' : ('cmr10', 50),
r'!' : ('cmr10', 29),
r'&' : ('cmr10', 109)
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\#' : ('pncr8a', 35),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {
'uni24C8' : 9416,
'aring' : 229,
'uni22A0' : 8864,
'uni2292' : 8850,
'quotedblright' : 8221,
'uni03D2' : 978,
'uni2215' : 8725,
'uni03D0' : 976,
'V' : 86,
'dollar' : 36,
'uni301E' : 12318,
'uni03D5' : 981,
'four' : 52,
'uni25A0' : 9632,
'uni013C' : 316,
'uni013B' : 315,
'uni013E' : 318,
'Yacute' : 221,
'uni25DE' : 9694,
'uni013F' : 319,
'uni255A' : 9562,
'uni2606' : 9734,
'uni0180' : 384,
'uni22B7' : 8887,
'uni044F' : 1103,
'uni22B5' : 8885,
'uni22B4' : 8884,
'uni22AE' : 8878,
'uni22B2' : 8882,
'uni22B1' : 8881,
'uni22B0' : 8880,
'uni25CD' : 9677,
'uni03CE' : 974,
'uni03CD' : 973,
'uni03CC' : 972,
'uni03CB' : 971,
'uni03CA' : 970,
'uni22B8' : 8888,
'uni22C9' : 8905,
'uni0449' : 1097,
'uni20DD' : 8413,
'uni20DC' : 8412,
'uni20DB' : 8411,
'uni2231' : 8753,
'uni25CF' : 9679,
'uni306E' : 12398,
'uni03D1' : 977,
'uni01A1' : 417,
'uni20D7' : 8407,
'uni03D6' : 982,
'uni2233' : 8755,
'uni20D2' : 8402,
'uni20D1' : 8401,
'uni20D0' : 8400,
'P' : 80,
'uni22BE' : 8894,
'uni22BD' : 8893,
'uni22BC' : 8892,
'uni22BB' : 8891,
'underscore' : 95,
'uni03C8' : 968,
'uni03C7' : 967,
'uni0328' : 808,
'uni03C5' : 965,
'uni03C4' : 964,
'uni03C3' : 963,
'uni03C2' : 962,
'uni03C1' : 961,
'uni03C0' : 960,
'uni2010' : 8208,
'uni0130' : 304,
'uni0133' : 307,
'uni0132' : 306,
'uni0135' : 309,
'uni0134' : 308,
'uni0137' : 311,
'uni0136' : 310,
'uni0139' : 313,
'uni0138' : 312,
'uni2244' : 8772,
'uni229A' : 8858,
'uni2571' : 9585,
'uni0278' : 632,
'uni2239' : 8761,
'p' : 112,
'uni3019' : 12313,
'uni25CB' : 9675,
'uni03DB' : 987,
'uni03DC' : 988,
'uni03DA' : 986,
'uni03DF' : 991,
'uni03DD' : 989,
'uni013D' : 317,
'uni220A' : 8714,
'uni220C' : 8716,
'uni220B' : 8715,
'uni220E' : 8718,
'uni220D' : 8717,
'uni220F' : 8719,
'uni22CC' : 8908,
'Otilde' : 213,
'uni25E5' : 9701,
'uni2736' : 10038,
'perthousand' : 8240,
'zero' : 48,
'uni279B' : 10139,
'dotlessi' : 305,
'uni2279' : 8825,
'Scaron' : 352,
'zcaron' : 382,
'uni21D8' : 8664,
'egrave' : 232,
'uni0271' : 625,
'uni01AA' : 426,
'uni2332' : 9010,
'section' : 167,
'uni25E4' : 9700,
'Icircumflex' : 206,
'ntilde' : 241,
'uni041E' : 1054,
'ampersand' : 38,
'uni041C' : 1052,
'uni041A' : 1050,
'uni22AB' : 8875,
'uni21DB' : 8667,
'dotaccent' : 729,
'uni0416' : 1046,
'uni0417' : 1047,
'uni0414' : 1044,
'uni0415' : 1045,
'uni0412' : 1042,
'uni0413' : 1043,
'degree' : 176,
'uni0411' : 1041,
'K' : 75,
'uni25EB' : 9707,
'uni25EF' : 9711,
'uni0418' : 1048,
'uni0419' : 1049,
'uni2263' : 8803,
'uni226E' : 8814,
'uni2251' : 8785,
'uni02C8' : 712,
'uni2262' : 8802,
'acircumflex' : 226,
'uni22B3' : 8883,
'uni2261' : 8801,
'uni2394' : 9108,
'Aring' : 197,
'uni2260' : 8800,
'uni2254' : 8788,
'uni0436' : 1078,
'uni2267' : 8807,
'k' : 107,
'uni22C8' : 8904,
'uni226A' : 8810,
'uni231F' : 8991,
'smalltilde' : 732,
'uni2201' : 8705,
'uni2200' : 8704,
'uni2203' : 8707,
'uni02BD' : 701,
'uni2205' : 8709,
'uni2204' : 8708,
'Agrave' : 192,
'uni2206' : 8710,
'uni2209' : 8713,
'uni2208' : 8712,
'uni226D' : 8813,
'uni2264' : 8804,
'uni263D' : 9789,
'uni2258' : 8792,
'uni02D3' : 723,
'uni02D2' : 722,
'uni02D1' : 721,
'uni02D0' : 720,
'uni25E1' : 9697,
'divide' : 247,
'uni02D5' : 725,
'uni02D4' : 724,
'ocircumflex' : 244,
'uni2524' : 9508,
'uni043A' : 1082,
'uni24CC' : 9420,
'asciitilde' : 126,
'uni22B9' : 8889,
'uni24D2' : 9426,
'uni211E' : 8478,
'uni211D' : 8477,
'uni24DD' : 9437,
'uni211A' : 8474,
'uni211C' : 8476,
'uni211B' : 8475,
'uni25C6' : 9670,
'uni017F' : 383,
'uni017A' : 378,
'uni017C' : 380,
'uni017B' : 379,
'uni0346' : 838,
'uni22F1' : 8945,
'uni22F0' : 8944,
'two' : 50,
'uni2298' : 8856,
'uni24D1' : 9425,
'E' : 69,
'uni025D' : 605,
'scaron' : 353,
'uni2322' : 8994,
'uni25E3' : 9699,
'uni22BF' : 8895,
'F' : 70,
'uni0440' : 1088,
'uni255E' : 9566,
'uni22BA' : 8890,
'uni0175' : 373,
'uni0174' : 372,
'uni0177' : 375,
'uni0176' : 374,
'bracketleft' : 91,
'uni0170' : 368,
'uni0173' : 371,
'uni0172' : 370,
'asciicircum' : 94,
'uni0179' : 377,
'uni2590' : 9616,
'uni25E2' : 9698,
'uni2119' : 8473,
'uni2118' : 8472,
'uni25CC' : 9676,
'f' : 102,
'ordmasculine' : 186,
'uni229B' : 8859,
'uni22A1' : 8865,
'uni2111' : 8465,
'uni2110' : 8464,
'uni2113' : 8467,
'uni2112' : 8466,
'mu' : 181,
'uni2281' : 8833,
'paragraph' : 182,
'nine' : 57,
'uni25EC' : 9708,
'v' : 118,
'uni040C' : 1036,
'uni0113' : 275,
'uni22D0' : 8912,
'uni21CC' : 8652,
'uni21CB' : 8651,
'uni21CA' : 8650,
'uni22A5' : 8869,
'uni21CF' : 8655,
'uni21CE' : 8654,
'uni21CD' : 8653,
'guilsinglleft' : 8249,
'backslash' : 92,
'uni2284' : 8836,
'uni224E' : 8782,
'uni224D' : 8781,
'uni224F' : 8783,
'uni224A' : 8778,
'uni2287' : 8839,
'uni224C' : 8780,
'uni224B' : 8779,
'uni21BD' : 8637,
'uni2286' : 8838,
'uni030F' : 783,
'uni030D' : 781,
'uni030E' : 782,
'uni030B' : 779,
'uni030C' : 780,
'uni030A' : 778,
'uni026E' : 622,
'uni026D' : 621,
'six' : 54,
'uni026A' : 618,
'uni026C' : 620,
'uni25C1' : 9665,
'uni20D6' : 8406,
'uni045B' : 1115,
'uni045C' : 1116,
'uni256B' : 9579,
'uni045A' : 1114,
'uni045F' : 1119,
'uni045E' : 1118,
'A' : 65,
'uni2569' : 9577,
'uni0458' : 1112,
'uni0459' : 1113,
'uni0452' : 1106,
'uni0453' : 1107,
'uni2562' : 9570,
'uni0451' : 1105,
'uni0456' : 1110,
'uni0457' : 1111,
'uni0454' : 1108,
'uni0455' : 1109,
'icircumflex' : 238,
'uni0307' : 775,
'uni0304' : 772,
'uni0305' : 773,
'uni0269' : 617,
'uni0268' : 616,
'uni0300' : 768,
'uni0301' : 769,
'uni0265' : 613,
'uni0264' : 612,
'uni0267' : 615,
'uni0266' : 614,
'uni0261' : 609,
'uni0260' : 608,
'uni0263' : 611,
'uni0262' : 610,
'a' : 97,
'uni2207' : 8711,
'uni2247' : 8775,
'uni2246' : 8774,
'uni2241' : 8769,
'uni2240' : 8768,
'uni2243' : 8771,
'uni2242' : 8770,
'uni2312' : 8978,
'ogonek' : 731,
'uni2249' : 8777,
'uni2248' : 8776,
'uni3030' : 12336,
'q' : 113,
'uni21C2' : 8642,
'uni21C1' : 8641,
'uni21C0' : 8640,
'uni21C7' : 8647,
'uni21C6' : 8646,
'uni21C5' : 8645,
'uni21C4' : 8644,
'uni225F' : 8799,
'uni212C' : 8492,
'uni21C8' : 8648,
'uni2467' : 9319,
'oacute' : 243,
'uni028F' : 655,
'uni028E' : 654,
'uni026F' : 623,
'uni028C' : 652,
'uni028B' : 651,
'uni028A' : 650,
'uni2510' : 9488,
'ograve' : 242,
'edieresis' : 235,
'uni22CE' : 8910,
'uni22CF' : 8911,
'uni219F' : 8607,
'comma' : 44,
'uni22CA' : 8906,
'uni0429' : 1065,
'uni03C6' : 966,
'uni0427' : 1063,
'uni0426' : 1062,
'uni0425' : 1061,
'uni0424' : 1060,
'uni0423' : 1059,
'uni0422' : 1058,
'uni0421' : 1057,
'uni0420' : 1056,
'uni2465' : 9317,
'uni24D0' : 9424,
'uni2464' : 9316,
'uni0430' : 1072,
'otilde' : 245,
'uni2661' : 9825,
'uni24D6' : 9430,
'uni2466' : 9318,
'uni24D5' : 9429,
'uni219A' : 8602,
'uni2518' : 9496,
'uni22B6' : 8886,
'uni2461' : 9313,
'uni24D4' : 9428,
'uni2460' : 9312,
'uni24EA' : 9450,
'guillemotright' : 187,
'ecircumflex' : 234,
'greater' : 62,
'uni2011' : 8209,
'uacute' : 250,
'uni2462' : 9314,
'L' : 76,
'bullet' : 8226,
'uni02A4' : 676,
'uni02A7' : 679,
'cedilla' : 184,
'uni02A2' : 674,
'uni2015' : 8213,
'uni22C4' : 8900,
'uni22C5' : 8901,
'uni22AD' : 8877,
'uni22C7' : 8903,
'uni22C0' : 8896,
'uni2016' : 8214,
'uni22C2' : 8898,
'uni22C3' : 8899,
'uni24CF' : 9423,
'uni042F' : 1071,
'uni042E' : 1070,
'uni042D' : 1069,
'ydieresis' : 255,
'l' : 108,
'logicalnot' : 172,
'uni24CA' : 9418,
'uni0287' : 647,
'uni0286' : 646,
'uni0285' : 645,
'uni0284' : 644,
'uni0283' : 643,
'uni0282' : 642,
'uni0281' : 641,
'uni027C' : 636,
'uni2664' : 9828,
'exclamdown' : 161,
'uni25C4' : 9668,
'uni0289' : 649,
'uni0288' : 648,
'uni039A' : 922,
'endash' : 8211,
'uni2640' : 9792,
'uni20E4' : 8420,
'uni0473' : 1139,
'uni20E1' : 8417,
'uni2642' : 9794,
'uni03B8' : 952,
'uni03B9' : 953,
'agrave' : 224,
'uni03B4' : 948,
'uni03B5' : 949,
'uni03B6' : 950,
'uni03B7' : 951,
'uni03B0' : 944,
'uni03B1' : 945,
'uni03B2' : 946,
'uni03B3' : 947,
'uni2555' : 9557,
'Adieresis' : 196,
'germandbls' : 223,
'Odieresis' : 214,
'space' : 32,
'uni0126' : 294,
'uni0127' : 295,
'uni0124' : 292,
'uni0125' : 293,
'uni0122' : 290,
'uni0123' : 291,
'uni0120' : 288,
'uni0121' : 289,
'quoteright' : 8217,
'uni2560' : 9568,
'uni2556' : 9558,
'ucircumflex' : 251,
'uni2561' : 9569,
'uni2551' : 9553,
'uni25B2' : 9650,
'uni2550' : 9552,
'uni2563' : 9571,
'uni2553' : 9555,
'G' : 71,
'uni2564' : 9572,
'uni2552' : 9554,
'quoteleft' : 8216,
'uni2565' : 9573,
'uni2572' : 9586,
'uni2568' : 9576,
'uni2566' : 9574,
'W' : 87,
'uni214A' : 8522,
'uni012F' : 303,
'uni012D' : 301,
'uni012E' : 302,
'uni012B' : 299,
'uni012C' : 300,
'uni255C' : 9564,
'uni012A' : 298,
'uni2289' : 8841,
'Q' : 81,
'uni2320' : 8992,
'uni2321' : 8993,
'g' : 103,
'uni03BD' : 957,
'uni03BE' : 958,
'uni03BF' : 959,
'uni2282' : 8834,
'uni2285' : 8837,
'uni03BA' : 954,
'uni03BB' : 955,
'uni03BC' : 956,
'uni2128' : 8488,
'uni25B7' : 9655,
'w' : 119,
'uni0302' : 770,
'uni03DE' : 990,
'uni25DA' : 9690,
'uni0303' : 771,
'uni0463' : 1123,
'uni0462' : 1122,
'uni3018' : 12312,
'uni2514' : 9492,
'question' : 63,
'uni25B3' : 9651,
'uni24E1' : 9441,
'one' : 49,
'uni200A' : 8202,
'uni2278' : 8824,
'ring' : 730,
'uni0195' : 405,
'figuredash' : 8210,
'uni22EC' : 8940,
'uni0339' : 825,
'uni0338' : 824,
'uni0337' : 823,
'uni0336' : 822,
'uni0335' : 821,
'uni0333' : 819,
'uni0332' : 818,
'uni0331' : 817,
'uni0330' : 816,
'uni01C1' : 449,
'uni01C0' : 448,
'uni01C3' : 451,
'uni01C2' : 450,
'uni2353' : 9043,
'uni0308' : 776,
'uni2218' : 8728,
'uni2219' : 8729,
'uni2216' : 8726,
'uni2217' : 8727,
'uni2214' : 8724,
'uni0309' : 777,
'uni2609' : 9737,
'uni2213' : 8723,
'uni2210' : 8720,
'uni2211' : 8721,
'uni2245' : 8773,
'B' : 66,
'uni25D6' : 9686,
'iacute' : 237,
'uni02E6' : 742,
'uni02E7' : 743,
'uni02E8' : 744,
'uni02E9' : 745,
'uni221D' : 8733,
'uni221E' : 8734,
'Ydieresis' : 376,
'uni221C' : 8732,
'uni22D7' : 8919,
'uni221A' : 8730,
'R' : 82,
'uni24DC' : 9436,
'uni033F' : 831,
'uni033E' : 830,
'uni033C' : 828,
'uni033B' : 827,
'uni033A' : 826,
'b' : 98,
'uni228A' : 8842,
'uni22DB' : 8923,
'uni2554' : 9556,
'uni046B' : 1131,
'uni046A' : 1130,
'r' : 114,
'uni24DB' : 9435,
'Ccedilla' : 199,
'minus' : 8722,
'uni24DA' : 9434,
'uni03F0' : 1008,
'uni03F1' : 1009,
'uni20AC' : 8364,
'uni2276' : 8822,
'uni24C0' : 9408,
'uni0162' : 354,
'uni0163' : 355,
'uni011E' : 286,
'uni011D' : 285,
'uni011C' : 284,
'uni011B' : 283,
'uni0164' : 356,
'uni0165' : 357,
'Lslash' : 321,
'uni0168' : 360,
'uni0169' : 361,
'uni25C9' : 9673,
'uni02E5' : 741,
'uni21C3' : 8643,
'uni24C4' : 9412,
'uni24E2' : 9442,
'uni2277' : 8823,
'uni013A' : 314,
'uni2102' : 8450,
'Uacute' : 218,
'uni2317' : 8983,
'uni2107' : 8455,
'uni221F' : 8735,
'yacute' : 253,
'uni3012' : 12306,
'Ucircumflex' : 219,
'uni015D' : 349,
'quotedbl' : 34,
'uni25D9' : 9689,
'uni2280' : 8832,
'uni22AF' : 8879,
'onehalf' : 189,
'uni221B' : 8731,
'Thorn' : 222,
'uni2226' : 8742,
'M' : 77,
'uni25BA' : 9658,
'uni2463' : 9315,
'uni2336' : 9014,
'eight' : 56,
'uni2236' : 8758,
'multiply' : 215,
'uni210C' : 8460,
'uni210A' : 8458,
'uni21C9' : 8649,
'grave' : 96,
'uni210E' : 8462,
'uni0117' : 279,
'uni016C' : 364,
'uni0115' : 277,
'uni016A' : 362,
'uni016F' : 367,
'uni0112' : 274,
'uni016D' : 365,
'uni016E' : 366,
'Ocircumflex' : 212,
'uni2305' : 8965,
'm' : 109,
'uni24DF' : 9439,
'uni0119' : 281,
'uni0118' : 280,
'uni20A3' : 8355,
'uni20A4' : 8356,
'uni20A7' : 8359,
'uni2288' : 8840,
'uni24C3' : 9411,
'uni251C' : 9500,
'uni228D' : 8845,
'uni222F' : 8751,
'uni222E' : 8750,
'uni222D' : 8749,
'uni222C' : 8748,
'uni222B' : 8747,
'uni222A' : 8746,
'uni255B' : 9563,
'Ugrave' : 217,
'uni24DE' : 9438,
'guilsinglright' : 8250,
'uni250A' : 9482,
'Ntilde' : 209,
'uni0279' : 633,
'questiondown' : 191,
'uni256C' : 9580,
'Atilde' : 195,
'uni0272' : 626,
'uni0273' : 627,
'uni0270' : 624,
'ccedilla' : 231,
'uni0276' : 630,
'uni0277' : 631,
'uni0274' : 628,
'uni0275' : 629,
'uni2252' : 8786,
'uni041F' : 1055,
'uni2250' : 8784,
'Z' : 90,
'uni2256' : 8790,
'uni2257' : 8791,
'copyright' : 169,
'uni2255' : 8789,
'uni043D' : 1085,
'uni043E' : 1086,
'uni043F' : 1087,
'yen' : 165,
'uni041D' : 1053,
'uni043B' : 1083,
'uni043C' : 1084,
'uni21B0' : 8624,
'uni21B1' : 8625,
'uni21B2' : 8626,
'uni21B3' : 8627,
'uni21B4' : 8628,
'uni21B5' : 8629,
'uni21B6' : 8630,
'uni21B7' : 8631,
'uni21B8' : 8632,
'Eacute' : 201,
'uni2311' : 8977,
'uni2310' : 8976,
'uni228F' : 8847,
'uni25DB' : 9691,
'uni21BA' : 8634,
'uni21BB' : 8635,
'uni21BC' : 8636,
'uni2017' : 8215,
'uni21BE' : 8638,
'uni21BF' : 8639,
'uni231C' : 8988,
'H' : 72,
'uni0293' : 659,
'uni2202' : 8706,
'uni22A4' : 8868,
'uni231E' : 8990,
'uni2232' : 8754,
'uni225B' : 8795,
'uni225C' : 8796,
'uni24D9' : 9433,
'uni225A' : 8794,
'uni0438' : 1080,
'uni0439' : 1081,
'uni225D' : 8797,
'uni225E' : 8798,
'uni0434' : 1076,
'X' : 88,
'uni007F' : 127,
'uni0437' : 1079,
'Idieresis' : 207,
'uni0431' : 1073,
'uni0432' : 1074,
'uni0433' : 1075,
'uni22AC' : 8876,
'uni22CD' : 8909,
'uni25A3' : 9635,
'bar' : 124,
'uni24BB' : 9403,
'uni037E' : 894,
'uni027B' : 635,
'h' : 104,
'uni027A' : 634,
'uni027F' : 639,
'uni027D' : 637,
'uni027E' : 638,
'uni2227' : 8743,
'uni2004' : 8196,
'uni2225' : 8741,
'uni2224' : 8740,
'uni2223' : 8739,
'uni2222' : 8738,
'uni2221' : 8737,
'uni2220' : 8736,
'x' : 120,
'uni2323' : 8995,
'uni2559' : 9561,
'uni2558' : 9560,
'uni2229' : 8745,
'uni2228' : 8744,
'udieresis' : 252,
'uni029D' : 669,
'ordfeminine' : 170,
'uni22CB' : 8907,
'uni233D' : 9021,
'uni0428' : 1064,
'uni24C6' : 9414,
'uni22DD' : 8925,
'uni24C7' : 9415,
'uni015C' : 348,
'uni015B' : 347,
'uni015A' : 346,
'uni22AA' : 8874,
'uni015F' : 351,
'uni015E' : 350,
'braceleft' : 123,
'uni24C5' : 9413,
'uni0410' : 1040,
'uni03AA' : 938,
'uni24C2' : 9410,
'uni03AC' : 940,
'uni03AB' : 939,
'macron' : 175,
'uni03AD' : 941,
'uni03AF' : 943,
'uni0294' : 660,
'uni0295' : 661,
'uni0296' : 662,
'uni0297' : 663,
'uni0290' : 656,
'uni0291' : 657,
'uni0292' : 658,
'atilde' : 227,
'Acircumflex' : 194,
'uni2370' : 9072,
'uni24C1' : 9409,
'uni0298' : 664,
'uni0299' : 665,
'Oslash' : 216,
'uni029E' : 670,
'C' : 67,
'quotedblleft' : 8220,
'uni029B' : 667,
'uni029C' : 668,
'uni03A9' : 937,
'uni03A8' : 936,
'S' : 83,
'uni24C9' : 9417,
'uni03A1' : 929,
'uni03A0' : 928,
'exclam' : 33,
'uni03A5' : 933,
'uni03A4' : 932,
'uni03A7' : 935,
'Zcaron' : 381,
'uni2133' : 8499,
'uni2132' : 8498,
'uni0159' : 345,
'uni0158' : 344,
'uni2137' : 8503,
'uni2005' : 8197,
'uni2135' : 8501,
'uni2134' : 8500,
'uni02BA' : 698,
'uni2033' : 8243,
'uni0151' : 337,
'uni0150' : 336,
'uni0157' : 343,
'equal' : 61,
'uni0155' : 341,
'uni0154' : 340,
's' : 115,
'uni233F' : 9023,
'eth' : 240,
'uni24BE' : 9406,
'uni21E9' : 8681,
'uni2060' : 8288,
'Egrave' : 200,
'uni255D' : 9565,
'uni24CD' : 9421,
'uni21E1' : 8673,
'uni21B9' : 8633,
'hyphen' : 45,
'uni01BE' : 446,
'uni01BB' : 443,
'period' : 46,
'igrave' : 236,
'uni01BA' : 442,
'uni2296' : 8854,
'uni2297' : 8855,
'uni2294' : 8852,
'uni2295' : 8853,
'colon' : 58,
'uni2293' : 8851,
'uni2290' : 8848,
'uni2291' : 8849,
'uni032D' : 813,
'uni032E' : 814,
'uni032F' : 815,
'uni032A' : 810,
'uni032B' : 811,
'uni032C' : 812,
'uni231D' : 8989,
'Ecircumflex' : 202,
'uni24D7' : 9431,
'uni25DD' : 9693,
'trademark' : 8482,
'Aacute' : 193,
'cent' : 162,
'uni0445' : 1093,
'uni266E' : 9838,
'uni266D' : 9837,
'uni266B' : 9835,
'uni03C9' : 969,
'uni2003' : 8195,
'uni2047' : 8263,
'lslash' : 322,
'uni03A6' : 934,
'uni2043' : 8259,
'uni250C' : 9484,
'uni2040' : 8256,
'uni255F' : 9567,
'uni24CB' : 9419,
'uni0472' : 1138,
'uni0446' : 1094,
'uni0474' : 1140,
'uni0475' : 1141,
'uni2508' : 9480,
'uni2660' : 9824,
'uni2506' : 9478,
'uni2502' : 9474,
'c' : 99,
'uni2500' : 9472,
'N' : 78,
'uni22A6' : 8870,
'uni21E7' : 8679,
'uni2130' : 8496,
'uni2002' : 8194,
'breve' : 728,
'uni0442' : 1090,
'Oacute' : 211,
'uni229F' : 8863,
'uni25C7' : 9671,
'uni229D' : 8861,
'uni229E' : 8862,
'guillemotleft' : 171,
'uni0329' : 809,
'uni24E5' : 9445,
'uni011F' : 287,
'uni0324' : 804,
'uni0325' : 805,
'uni0326' : 806,
'uni0327' : 807,
'uni0321' : 801,
'uni0322' : 802,
'n' : 110,
'uni2032' : 8242,
'uni2269' : 8809,
'uni2268' : 8808,
'uni0306' : 774,
'uni226B' : 8811,
'uni21EA' : 8682,
'uni0166' : 358,
'uni203B' : 8251,
'uni01B5' : 437,
'idieresis' : 239,
'uni02BC' : 700,
'uni01B0' : 432,
'braceright' : 125,
'seven' : 55,
'uni02BB' : 699,
'uni011A' : 282,
'uni29FB' : 10747,
'brokenbar' : 166,
'uni2036' : 8246,
'uni25C0' : 9664,
'uni0156' : 342,
'uni22D5' : 8917,
'uni0258' : 600,
'ugrave' : 249,
'uni22D6' : 8918,
'uni22D1' : 8913,
'uni2034' : 8244,
'uni22D3' : 8915,
'uni22D2' : 8914,
'uni203C' : 8252,
'uni223E' : 8766,
'uni02BF' : 703,
'uni22D9' : 8921,
'uni22D8' : 8920,
'uni25BD' : 9661,
'uni25BE' : 9662,
'uni25BF' : 9663,
'uni041B' : 1051,
'periodcentered' : 183,
'uni25BC' : 9660,
'uni019E' : 414,
'uni019B' : 411,
'uni019A' : 410,
'uni2007' : 8199,
'uni0391' : 913,
'uni0390' : 912,
'uni0393' : 915,
'uni0392' : 914,
'uni0395' : 917,
'uni0394' : 916,
'uni0397' : 919,
'uni0396' : 918,
'uni0399' : 921,
'uni0398' : 920,
'uni25C8' : 9672,
'uni2468' : 9320,
'sterling' : 163,
'uni22EB' : 8939,
'uni039C' : 924,
'uni039B' : 923,
'uni039E' : 926,
'uni039D' : 925,
'uni039F' : 927,
'I' : 73,
'uni03E1' : 993,
'uni03E0' : 992,
'uni2319' : 8985,
'uni228B' : 8843,
'uni25B5' : 9653,
'uni25B6' : 9654,
'uni22EA' : 8938,
'uni24B9' : 9401,
'uni044E' : 1102,
'uni0199' : 409,
'uni2266' : 8806,
'Y' : 89,
'uni22A2' : 8866,
'Eth' : 208,
'uni266F' : 9839,
'emdash' : 8212,
'uni263B' : 9787,
'uni24BD' : 9405,
'uni22DE' : 8926,
'uni0360' : 864,
'uni2557' : 9559,
'uni22DF' : 8927,
'uni22DA' : 8922,
'uni22DC' : 8924,
'uni0361' : 865,
'i' : 105,
'uni24BF' : 9407,
'uni0362' : 866,
'uni263E' : 9790,
'uni028D' : 653,
'uni2259' : 8793,
'uni0323' : 803,
'uni2265' : 8805,
'daggerdbl' : 8225,
'y' : 121,
'uni010A' : 266,
'plusminus' : 177,
'less' : 60,
'uni21AE' : 8622,
'uni0315' : 789,
'uni230B' : 8971,
'uni21AF' : 8623,
'uni21AA' : 8618,
'uni21AC' : 8620,
'uni21AB' : 8619,
'uni01FB' : 507,
'uni01FC' : 508,
'uni223A' : 8762,
'uni01FA' : 506,
'uni01FF' : 511,
'uni01FD' : 509,
'uni01FE' : 510,
'uni2567' : 9575,
'uni25E0' : 9696,
'uni0104' : 260,
'uni0105' : 261,
'uni0106' : 262,
'uni0107' : 263,
'uni0100' : 256,
'uni0101' : 257,
'uni0102' : 258,
'uni0103' : 259,
'uni2038' : 8248,
'uni2009' : 8201,
'uni2008' : 8200,
'uni0108' : 264,
'uni0109' : 265,
'uni02A1' : 673,
'uni223B' : 8763,
'uni226C' : 8812,
'uni25AC' : 9644,
'uni24D3' : 9427,
'uni21E0' : 8672,
'uni21E3' : 8675,
'Udieresis' : 220,
'uni21E2' : 8674,
'D' : 68,
'uni21E5' : 8677,
'uni2621' : 9761,
'uni21D1' : 8657,
'uni203E' : 8254,
'uni22C6' : 8902,
'uni21E4' : 8676,
'uni010D' : 269,
'uni010E' : 270,
'uni010F' : 271,
'five' : 53,
'T' : 84,
'uni010B' : 267,
'uni010C' : 268,
'uni2605' : 9733,
'uni2663' : 9827,
'uni21E6' : 8678,
'uni24B6' : 9398,
'uni22C1' : 8897,
'oslash' : 248,
'acute' : 180,
'uni01F0' : 496,
'd' : 100,
'OE' : 338,
'uni22E3' : 8931,
'Igrave' : 204,
'uni2308' : 8968,
'uni2309' : 8969,
'uni21A9' : 8617,
't' : 116,
'uni2313' : 8979,
'uni03A3' : 931,
'uni21A4' : 8612,
'uni21A7' : 8615,
'uni21A6' : 8614,
'uni21A1' : 8609,
'uni21A0' : 8608,
'uni21A3' : 8611,
'uni21A2' : 8610,
'parenright' : 41,
'uni256A' : 9578,
'uni25DC' : 9692,
'uni24CE' : 9422,
'uni042C' : 1068,
'uni24E0' : 9440,
'uni042B' : 1067,
'uni0409' : 1033,
'uni0408' : 1032,
'uni24E7' : 9447,
'uni25B4' : 9652,
'uni042A' : 1066,
'uni228E' : 8846,
'uni0401' : 1025,
'adieresis' : 228,
'uni0403' : 1027,
'quotesingle' : 39,
'uni0405' : 1029,
'uni0404' : 1028,
'uni0407' : 1031,
'uni0406' : 1030,
'uni229C' : 8860,
'uni2306' : 8966,
'uni2253' : 8787,
'twodotenleader' : 8229,
'uni2131' : 8497,
'uni21DA' : 8666,
'uni2234' : 8756,
'uni2235' : 8757,
'uni01A5' : 421,
'uni2237' : 8759,
'uni2230' : 8752,
'uni02CC' : 716,
'slash' : 47,
'uni01A0' : 416,
'ellipsis' : 8230,
'uni2299' : 8857,
'uni2238' : 8760,
'numbersign' : 35,
'uni21A8' : 8616,
'uni223D' : 8765,
'uni01AF' : 431,
'uni223F' : 8767,
'uni01AD' : 429,
'uni01AB' : 427,
'odieresis' : 246,
'uni223C' : 8764,
'uni227D' : 8829,
'uni0280' : 640,
'O' : 79,
'uni227E' : 8830,
'uni21A5' : 8613,
'uni22D4' : 8916,
'uni25D4' : 9684,
'uni227F' : 8831,
'uni0435' : 1077,
'uni2302' : 8962,
'uni2669' : 9833,
'uni24E3' : 9443,
'uni2720' : 10016,
'uni22A8' : 8872,
'uni22A9' : 8873,
'uni040A' : 1034,
'uni22A7' : 8871,
'oe' : 339,
'uni040B' : 1035,
'uni040E' : 1038,
'uni22A3' : 8867,
'o' : 111,
'uni040F' : 1039,
'Edieresis' : 203,
'uni25D5' : 9685,
'plus' : 43,
'uni044D' : 1101,
'uni263C' : 9788,
'uni22E6' : 8934,
'uni2283' : 8835,
'uni258C' : 9612,
'uni219E' : 8606,
'uni24E4' : 9444,
'uni2136' : 8502,
'dagger' : 8224,
'uni24B7' : 9399,
'uni219B' : 8603,
'uni22E5' : 8933,
'three' : 51,
'uni210B' : 8459,
'uni2534' : 9524,
'uni24B8' : 9400,
'uni230A' : 8970,
'hungarumlaut' : 733,
'parenleft' : 40,
'uni0148' : 328,
'uni0149' : 329,
'uni2124' : 8484,
'uni2125' : 8485,
'uni2126' : 8486,
'uni2127' : 8487,
'uni0140' : 320,
'uni2129' : 8489,
'uni25C5' : 9669,
'uni0143' : 323,
'uni0144' : 324,
'uni0145' : 325,
'uni0146' : 326,
'uni0147' : 327,
'uni210D' : 8461,
'fraction' : 8260,
'uni2031' : 8241,
'uni2196' : 8598,
'uni2035' : 8245,
'uni24E6' : 9446,
'uni016B' : 363,
'uni24BA' : 9402,
'uni266A' : 9834,
'uni0116' : 278,
'uni2115' : 8469,
'registered' : 174,
'J' : 74,
'uni25DF' : 9695,
'uni25CE' : 9678,
'uni273D' : 10045,
'dieresis' : 168,
'uni212B' : 8491,
'uni0114' : 276,
'uni212D' : 8493,
'uni212E' : 8494,
'uni212F' : 8495,
'uni014A' : 330,
'uni014B' : 331,
'uni014C' : 332,
'uni014D' : 333,
'uni014E' : 334,
'uni014F' : 335,
'uni025E' : 606,
'uni24E8' : 9448,
'uni0111' : 273,
'uni24E9' : 9449,
'Ograve' : 210,
'j' : 106,
'uni2195' : 8597,
'uni2194' : 8596,
'uni2197' : 8599,
'uni2037' : 8247,
'uni2191' : 8593,
'uni2190' : 8592,
'uni2193' : 8595,
'uni2192' : 8594,
'uni29FA' : 10746,
'uni2713' : 10003,
'z' : 122,
'uni2199' : 8601,
'uni2198' : 8600,
'uni2667' : 9831,
'ae' : 230,
'uni0448' : 1096,
'semicolon' : 59,
'uni2666' : 9830,
'uni038F' : 911,
'uni0444' : 1092,
'uni0447' : 1095,
'uni038E' : 910,
'uni0441' : 1089,
'uni038C' : 908,
'uni0443' : 1091,
'uni038A' : 906,
'uni0250' : 592,
'uni0251' : 593,
'uni0252' : 594,
'uni0253' : 595,
'uni0254' : 596,
'at' : 64,
'uni0256' : 598,
'uni0257' : 599,
'uni0167' : 359,
'uni0259' : 601,
'uni228C' : 8844,
'uni2662' : 9826,
'uni0319' : 793,
'uni0318' : 792,
'uni24BC' : 9404,
'uni0402' : 1026,
'uni22EF' : 8943,
'Iacute' : 205,
'uni22ED' : 8941,
'uni22EE' : 8942,
'uni0311' : 785,
'uni0310' : 784,
'uni21E8' : 8680,
'uni0312' : 786,
'percent' : 37,
'uni0317' : 791,
'uni0316' : 790,
'uni21D6' : 8662,
'uni21D7' : 8663,
'uni21D4' : 8660,
'uni21D5' : 8661,
'uni21D2' : 8658,
'uni21D3' : 8659,
'uni21D0' : 8656,
'uni2138' : 8504,
'uni2270' : 8816,
'uni2271' : 8817,
'uni2272' : 8818,
'uni2273' : 8819,
'uni2274' : 8820,
'uni2275' : 8821,
'bracketright' : 93,
'uni21D9' : 8665,
'uni21DF' : 8671,
'uni21DD' : 8669,
'uni21DE' : 8670,
'AE' : 198,
'uni03AE' : 942,
'uni227A' : 8826,
'uni227B' : 8827,
'uni227C' : 8828,
'asterisk' : 42,
'aacute' : 225,
'uni226F' : 8815,
'uni22E2' : 8930,
'uni0386' : 902,
'uni22E0' : 8928,
'uni22E1' : 8929,
'U' : 85,
'uni22E7' : 8935,
'uni22E4' : 8932,
'uni0387' : 903,
'uni031A' : 794,
'eacute' : 233,
'uni22E8' : 8936,
'uni22E9' : 8937,
'uni24D8' : 9432,
'uni025A' : 602,
'uni025B' : 603,
'uni025C' : 604,
'e' : 101,
'uni0128' : 296,
'uni025F' : 607,
'uni2665' : 9829,
'thorn' : 254,
'uni0129' : 297,
'uni253C' : 9532,
'uni25D7' : 9687,
'u' : 117,
'uni0388' : 904,
'uni0389' : 905,
'uni0255' : 597,
'uni0171' : 369,
'uni0384' : 900,
'uni0385' : 901,
'uni044A' : 1098,
'uni252C' : 9516,
'uni044C' : 1100,
'uni044B' : 1099
}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat' : 0x0302,
'widetilde' : 0x0303,
'widebar' : 0x0305,
'langle' : 0x27e8,
'rangle' : 0x27e9,
'perp' : 0x27c2,
'neq' : 0x2260,
'Join' : 0x2a1d,
'leqslant' : 0x2a7d,
'geqslant' : 0x2a7e,
'lessapprox' : 0x2a85,
'gtrapprox' : 0x2a86,
'lesseqqgtr' : 0x2a8b,
'gtreqqless' : 0x2a8c,
'triangleeq' : 0x225c,
'eqslantless' : 0x2a95,
'eqslantgtr' : 0x2a96,
'backepsilon' : 0x03f6,
'precapprox' : 0x2ab7,
'succapprox' : 0x2ab8,
'fallingdotseq' : 0x2252,
'subseteqq' : 0x2ac5,
'supseteqq' : 0x2ac6,
'varpropto' : 0x221d,
'precnapprox' : 0x2ab9,
'succnapprox' : 0x2aba,
'subsetneqq' : 0x2acb,
'supsetneqq' : 0x2acc,
'lnapprox' : 0x2ab9,
'gnapprox' : 0x2aba,
'longleftarrow' : 0x27f5,
'longrightarrow' : 0x27f6,
'longleftrightarrow' : 0x27f7,
'Longleftarrow' : 0x27f8,
'Longrightarrow' : 0x27f9,
'Longleftrightarrow' : 0x27fa,
'longmapsto' : 0x27fc,
'leadsto' : 0x21dd,
'dashleftarrow' : 0x290e,
'dashrightarrow' : 0x290f,
'circlearrowleft' : 0x21ba,
'circlearrowright' : 0x21bb,
'leftrightsquigarrow' : 0x21ad,
'leftsquigarrow' : 0x219c,
'rightsquigarrow' : 0x219d,
'Game' : 0x2141,
'hbar' : 0x0127,
'hslash' : 0x210f,
'ldots' : 0x2026,
'vdots' : 0x22ee,
'doteqdot' : 0x2251,
'doteq' : 8784,
'partial' : 8706,
'gg' : 8811,
'asymp' : 8781,
'blacktriangledown' : 9662,
'otimes' : 8855,
'nearrow' : 8599,
'varpi' : 982,
'vee' : 8744,
'vec' : 8407,
'smile' : 8995,
'succnsim' : 8937,
'gimel' : 8503,
'vert' : 124,
'|' : 124,
'varrho' : 1009,
'P' : 182,
'approxident' : 8779,
'Swarrow' : 8665,
'textasciicircum' : 94,
'imageof' : 8887,
'ntriangleleft' : 8938,
'nleq' : 8816,
'div' : 247,
'nparallel' : 8742,
'Leftarrow' : 8656,
'lll' : 8920,
'oiint' : 8751,
'ngeq' : 8817,
'Theta' : 920,
'origof' : 8886,
'blacksquare' : 9632,
'solbar' : 9023,
'neg' : 172,
'sum' : 8721,
'Vdash' : 8873,
'coloneq' : 8788,
'degree' : 176,
'bowtie' : 8904,
'blacktriangleright' : 9654,
'varsigma' : 962,
'leq' : 8804,
'ggg' : 8921,
'lneqq' : 8808,
'scurel' : 8881,
'stareq' : 8795,
'BbbN' : 8469,
'nLeftarrow' : 8653,
'nLeftrightarrow' : 8654,
'k' : 808,
'bot' : 8869,
'BbbC' : 8450,
'Lsh' : 8624,
'leftleftarrows' : 8647,
'BbbZ' : 8484,
'digamma' : 989,
'BbbR' : 8477,
'BbbP' : 8473,
'BbbQ' : 8474,
'vartriangleright' : 8883,
'succsim' : 8831,
'wedge' : 8743,
'lessgtr' : 8822,
'veebar' : 8891,
'mapsdown' : 8615,
'Rsh' : 8625,
'chi' : 967,
'prec' : 8826,
'nsubseteq' : 8840,
'therefore' : 8756,
'eqcirc' : 8790,
'textexclamdown' : 161,
'nRightarrow' : 8655,
'flat' : 9837,
'notin' : 8713,
'llcorner' : 8990,
'varepsilon' : 949,
'bigtriangleup' : 9651,
'aleph' : 8501,
'dotminus' : 8760,
'upsilon' : 965,
'Lambda' : 923,
'cap' : 8745,
'barleftarrow' : 8676,
'mu' : 956,
'boxplus' : 8862,
'mp' : 8723,
'circledast' : 8859,
'tau' : 964,
'in' : 8712,
'backslash' : 92,
'varnothing' : 8709,
'sharp' : 9839,
'eqsim' : 8770,
'gnsim' : 8935,
'Searrow' : 8664,
'updownarrows' : 8645,
'heartsuit' : 9825,
'trianglelefteq' : 8884,
'ddag' : 8225,
'sqsubseteq' : 8849,
'mapsfrom' : 8612,
'boxbar' : 9707,
'sim' : 8764,
'Nwarrow' : 8662,
'nequiv' : 8802,
'succ' : 8827,
'vdash' : 8866,
'Leftrightarrow' : 8660,
'parallel' : 8741,
'invnot' : 8976,
'natural' : 9838,
'ss' : 223,
'uparrow' : 8593,
'nsim' : 8769,
'hookrightarrow' : 8618,
'Equiv' : 8803,
'approx' : 8776,
'Vvdash' : 8874,
'nsucc' : 8833,
'leftrightharpoons' : 8651,
'Re' : 8476,
'boxminus' : 8863,
'equiv' : 8801,
'Lleftarrow' : 8666,
'thinspace' : 8201,
'll' : 8810,
'Cup' : 8915,
'measeq' : 8798,
'upharpoonleft' : 8639,
'lq' : 8216,
'Upsilon' : 933,
'subsetneq' : 8842,
'greater' : 62,
'supsetneq' : 8843,
'Cap' : 8914,
'L' : 321,
'spadesuit' : 9824,
'lrcorner' : 8991,
'not' : 824,
'bar' : 772,
'rightharpoonaccent' : 8401,
'boxdot' : 8865,
'l' : 322,
'leftharpoondown' : 8637,
'bigcup' : 8899,
'iint' : 8748,
'bigwedge' : 8896,
'downharpoonleft' : 8643,
'textasciitilde' : 126,
'subset' : 8834,
'leqq' : 8806,
'mapsup' : 8613,
'nvDash' : 8877,
'looparrowleft' : 8619,
'nless' : 8814,
'rightarrowbar' : 8677,
'Vert' : 8214,
'downdownarrows' : 8650,
'uplus' : 8846,
'simeq' : 8771,
'napprox' : 8777,
'ast' : 8727,
'twoheaduparrow' : 8607,
'doublebarwedge' : 8966,
'Sigma' : 931,
'leftharpoonaccent' : 8400,
'ntrianglelefteq' : 8940,
'nexists' : 8708,
'times' : 215,
'measuredangle' : 8737,
'bumpeq' : 8783,
'carriagereturn' : 8629,
'adots' : 8944,
'checkmark' : 10003,
'lambda' : 955,
'xi' : 958,
'rbrace' : 125,
'rbrack' : 93,
'Nearrow' : 8663,
'maltese' : 10016,
'clubsuit' : 9827,
'top' : 8868,
'overarc' : 785,
'varphi' : 966,
'Delta' : 916,
'iota' : 953,
'nleftarrow' : 8602,
'candra' : 784,
'supset' : 8835,
'triangleleft' : 9665,
'gtreqless' : 8923,
'ntrianglerighteq' : 8941,
'quad' : 8195,
'Xi' : 926,
'gtrdot' : 8919,
'leftthreetimes' : 8907,
'minus' : 8722,
'preccurlyeq' : 8828,
'nleftrightarrow' : 8622,
'lambdabar' : 411,
'blacktriangle' : 9652,
'kernelcontraction' : 8763,
'Phi' : 934,
'angle' : 8736,
'spadesuitopen' : 9828,
'eqless' : 8924,
'mid' : 8739,
'varkappa' : 1008,
'Ldsh' : 8626,
'updownarrow' : 8597,
'beta' : 946,
'textquotedblleft' : 8220,
'rho' : 961,
'alpha' : 945,
'intercal' : 8890,
'beth' : 8502,
'grave' : 768,
'acwopencirclearrow' : 8634,
'nmid' : 8740,
'nsupset' : 8837,
'sigma' : 963,
'dot' : 775,
'Rightarrow' : 8658,
'turnednot' : 8985,
'backsimeq' : 8909,
'leftarrowtail' : 8610,
'approxeq' : 8778,
'curlyeqsucc' : 8927,
'rightarrowtail' : 8611,
'Psi' : 936,
'copyright' : 169,
'yen' : 165,
'vartriangleleft' : 8882,
'rasp' : 700,
'triangleright' : 9655,
'precsim' : 8830,
'infty' : 8734,
'geq' : 8805,
'updownarrowbar' : 8616,
'precnsim' : 8936,
'H' : 779,
'ulcorner' : 8988,
'looparrowright' : 8620,
'ncong' : 8775,
'downarrow' : 8595,
'circeq' : 8791,
'subseteq' : 8838,
'bigstar' : 9733,
'prime' : 8242,
'lceil' : 8968,
'Rrightarrow' : 8667,
'oiiint' : 8752,
'curlywedge' : 8911,
'vDash' : 8872,
'lfloor' : 8970,
'ddots' : 8945,
'exists' : 8707,
'underbar' : 817,
'Pi' : 928,
'leftrightarrows' : 8646,
'sphericalangle' : 8738,
'coprod' : 8720,
'circledcirc' : 8858,
'gtrsim' : 8819,
'gneqq' : 8809,
'between' : 8812,
'theta' : 952,
'complement' : 8705,
'arceq' : 8792,
'nVdash' : 8878,
'S' : 167,
'wr' : 8768,
'wp' : 8472,
'backcong' : 8780,
'lasp' : 701,
'c' : 807,
'nabla' : 8711,
'dotplus' : 8724,
'eta' : 951,
'forall' : 8704,
'eth' : 240,
'colon' : 58,
'sqcup' : 8852,
'rightrightarrows' : 8649,
'sqsupset' : 8848,
'mapsto' : 8614,
'bigtriangledown' : 9661,
'sqsupseteq' : 8850,
'propto' : 8733,
'pi' : 960,
'pm' : 177,
'dots' : 0x2026,
'nrightarrow' : 8603,
'textasciiacute' : 180,
'Doteq' : 8785,
'breve' : 774,
'sqcap' : 8851,
'twoheadrightarrow' : 8608,
'kappa' : 954,
'vartriangle' : 9653,
'diamondsuit' : 9826,
'pitchfork' : 8916,
'blacktriangleleft' : 9664,
'nprec' : 8832,
'vdots' : 8942,
'curvearrowright' : 8631,
'barwedge' : 8892,
'multimap' : 8888,
'textquestiondown' : 191,
'cong' : 8773,
'rtimes' : 8906,
'rightzigzagarrow' : 8669,
'rightarrow' : 8594,
'leftarrow' : 8592,
'__sqrt__' : 8730,
'twoheaddownarrow' : 8609,
'oint' : 8750,
'bigvee' : 8897,
'eqdef' : 8797,
'sterling' : 163,
'phi' : 981,
'Updownarrow' : 8661,
'backprime' : 8245,
'emdash' : 8212,
'Gamma' : 915,
'i' : 305,
'rceil' : 8969,
'leftharpoonup' : 8636,
'Im' : 8465,
'curvearrowleft' : 8630,
'wedgeq' : 8793,
'fallingdotseq' : 8786,
'curlyeqprec' : 8926,
'questeq' : 8799,
'less' : 60,
'upuparrows' : 8648,
'tilde' : 771,
'textasciigrave' : 96,
'smallsetminus' : 8726,
'ell' : 8467,
'cup' : 8746,
'danger' : 9761,
'nVDash' : 8879,
'cdotp' : 183,
'cdots' : 8943,
'hat' : 770,
'eqgtr' : 8925,
'enspace' : 8194,
'psi' : 968,
'frown' : 8994,
'acute' : 769,
'downzigzagarrow' : 8623,
'ntriangleright' : 8939,
'cupdot' : 8845,
'circleddash' : 8861,
'oslash' : 8856,
'mho' : 8487,
'd' : 803,
'sqsubset' : 8847,
'cdot' : 8901,
'Omega' : 937,
'OE' : 338,
'veeeq' : 8794,
'Finv' : 8498,
't' : 865,
'leftrightarrow' : 8596,
'swarrow' : 8601,
'rightthreetimes' : 8908,
'rightleftharpoons' : 8652,
'lesssim' : 8818,
'searrow' : 8600,
'because' : 8757,
'gtrless' : 8823,
'star' : 8902,
'nsubset' : 8836,
'zeta' : 950,
'dddot' : 8411,
'bigcirc' : 9675,
'Supset' : 8913,
'circ' : 8728,
'slash' : 8725,
'ocirc' : 778,
'prod' : 8719,
'twoheadleftarrow' : 8606,
'daleth' : 8504,
'upharpoonright' : 8638,
'odot' : 8857,
'Uparrow' : 8657,
'O' : 216,
'hookleftarrow' : 8617,
'trianglerighteq' : 8885,
'nsime' : 8772,
'oe' : 339,
'nwarrow' : 8598,
'o' : 248,
'ddddot' : 8412,
'downharpoonright' : 8642,
'succcurlyeq' : 8829,
'gamma' : 947,
'scrR' : 8475,
'dag' : 8224,
'thickspace' : 8197,
'frakZ' : 8488,
'lessdot' : 8918,
'triangledown' : 9663,
'ltimes' : 8905,
'scrB' : 8492,
'endash' : 8211,
'scrE' : 8496,
'scrF' : 8497,
'scrH' : 8459,
'scrI' : 8464,
'rightharpoondown' : 8641,
'scrL' : 8466,
'scrM' : 8499,
'frakC' : 8493,
'nsupseteq' : 8841,
'circledR' : 174,
'circledS' : 9416,
'ngtr' : 8815,
'bigcap' : 8898,
'scre' : 8495,
'Downarrow' : 8659,
'scrg' : 8458,
'overleftrightarrow' : 8417,
'scro' : 8500,
'lnsim' : 8934,
'eqcolon' : 8789,
'curlyvee' : 8910,
'urcorner' : 8989,
'lbrace' : 123,
'Bumpeq' : 8782,
'delta' : 948,
'boxtimes' : 8864,
'overleftarrow' : 8406,
'prurel' : 8880,
'clubsuitopen' : 9831,
'cwopencirclearrow' : 8635,
'geqq' : 8807,
'rightleftarrows' : 8644,
'ac' : 8766,
'ae' : 230,
'int' : 8747,
'rfloor' : 8971,
'risingdotseq' : 8787,
'nvdash' : 8876,
'diamond' : 8900,
'ddot' : 776,
'backsim' : 8765,
'oplus' : 8853,
'triangleq' : 8796,
'check' : 780,
'ni' : 8715,
'iiint' : 8749,
'ne' : 8800,
'lesseqgtr' : 8922,
'obar' : 9021,
'supseteq' : 8839,
'nu' : 957,
'AA' : 8491,
'AE' : 198,
'models' : 8871,
'ominus' : 8854,
'dashv' : 8867,
'omega' : 969,
'rq' : 8217,
'Subset' : 8912,
'rightharpoonup' : 8640,
'Rdsh' : 8627,
'bullet' : 8729,
'divideontimes' : 8903,
'lbrack' : 91,
'textquotedblright' : 8221,
'Colon' : 8759,
'%' : 37,
'$' : 36,
'{' : 123,
'}' : 125,
'_' : 95,
'#' : 35,
'imath' : 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to' : 8594,
'succeq' : 8829,
'emptyset' : 8709,
'leftparen' : 40,
'rightparen' : 41,
'bigoplus' : 10753,
'leftangle' : 10216,
'rightangle' : 10217,
'leftbrace' : 124,
'rightbrace' : 125,
'jmath' : 567,
'bigodot' : 10752,
'preceq' : 8828,
'biguplus' : 10756,
'epsilon' : 949,
'vartheta' : 977,
'bigotimes' : 10754,
'guillemotleft' : 171,
'ring' : 730,
'Thorn' : 222,
'guilsinglright' : 8250,
'perthousand' : 8240,
'macron' : 175,
'cent' : 162,
'guillemotright' : 187,
'equal' : 61,
'asterisk' : 42,
'guilsinglleft' : 8249,
'plus' : 43,
'thorn' : 254,
'dagger' : 8224
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q
(0x0052, 0x0052, 'it', 0x211d), # R
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'bf', 0xe38a), # A-B
(0x0043, 0x0043, 'bf', 0x2102), # C
(0x0044, 0x0044, 'bf', 0x2145), # D
(0x0045, 0x0047, 'bf', 0xe38d), # E-G
(0x0048, 0x0048, 'bf', 0x210d), # H
(0x0049, 0x004d, 'bf', 0xe390), # I-M
(0x004e, 0x004e, 'bf', 0x2115), # N
(0x004f, 0x004f, 'bf', 0xe395), # O
(0x0050, 0x0051, 'bf', 0x2119), # P-Q
(0x0052, 0x0052, 'bf', 0x211d), # R
(0x0053, 0x0059, 'bf', 0xe396), # S-Y
(0x005a, 0x005a, 'bf', 0x2124), # Z
(0x0061, 0x0063, 'bf', 0xe39d), # a-c
(0x0064, 0x0065, 'bf', 0x2146), # d-e
(0x0066, 0x0068, 'bf', 0xe3a2), # f-h
(0x0069, 0x006a, 'bf', 0x2148), # i-j
(0x006b, 0x007a, 'bf', 0xe3a7), # k-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2133), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/_mathtext_data.py | Python | gpl-3.0 | 89,917 | [
"Bowtie"
] | 5eab7e8f6e7c8192a720764420f7c480d0996fc98435d1504d1d107d3dad5e3d |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloud
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Cloud Avi RESTful Object
description:
- This module is used to configure Cloud object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
apic_configuration:
description:
- Apicconfiguration settings for cloud.
apic_mode:
description:
- Boolean flag to set apic_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
aws_configuration:
description:
- Awsconfiguration settings for cloud.
azure_configuration:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
cloudstack_configuration:
description:
- Cloudstackconfiguration settings for cloud.
custom_tags:
description:
- Custom tags for all avi created resources in the cloud infrastructure.
- Field introduced in 17.1.5.
version_added: "2.5"
dhcp_enabled:
description:
- Select the ip address management scheme.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
dns_provider_ref:
description:
- Dns profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
docker_configuration:
description:
- Dockerconfiguration settings for cloud.
east_west_dns_provider_ref:
description:
- Dns profile for east-west services.
- It is a reference to an object of type ipamdnsproviderprofile.
east_west_ipam_provider_ref:
description:
- Ipam profile for east-west services.
- Warning - please use virtual subnets in this ipam profile that do not conflict with the underlay networks or any overlay networks in the cluster.
- For example in aws and gcp, 169.254.0.0/16 is used for storing instance metadata.
- Hence, it should not be used in this profile.
- It is a reference to an object of type ipamdnsproviderprofile.
enable_vip_static_routes:
description:
- Use static routes for vip side network resolution during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ipam_provider_ref:
description:
- Ipam profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
license_tier:
description:
- Specifies the default license tier which would be used by new se groups.
- This field by default inherits the value from system configuration.
- Enum options - ENTERPRISE_16, ENTERPRISE_18.
- Field introduced in 17.2.5.
version_added: "2.5"
license_type:
description:
- If no license type is specified then default license enforcement for the cloud type is chosen.
- The default mappings are container cloud is max ses, openstack and vmware is cores and linux it is sockets.
- Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS, LIC_SE_BANDWIDTH.
linuxserver_configuration:
description:
- Linuxserverconfiguration settings for cloud.
mesos_configuration:
description:
- Mesosconfiguration settings for cloud.
mtu:
description:
- Mtu setting for the cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
- Units(BYTES).
name:
description:
- Name of the object.
required: true
nsx_configuration:
description:
- Configuration parameters for nsx manager.
- Field introduced in 17.1.1.
obj_name_prefix:
description:
- Default prefix for all automatically created objects in this cloud.
- This prefix can be overridden by the se-group template.
openstack_configuration:
description:
- Openstackconfiguration settings for cloud.
oshiftk8s_configuration:
description:
- Oshiftk8sconfiguration settings for cloud.
prefer_static_routes:
description:
- Prefer static routes over interface routes during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
proxy_configuration:
description:
- Proxyconfiguration settings for cloud.
rancher_configuration:
description:
- Rancherconfiguration settings for cloud.
state_based_dns_registration:
description:
- Dns records for vips are added/deleted based on the operational state of the vips.
- Field introduced in 17.1.12.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.5"
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vca_configuration:
description:
- Vcloudairconfiguration settings for cloud.
vcenter_configuration:
description:
- Vcenterconfiguration settings for cloud.
vtype:
description:
- Cloud type.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S, CLOUD_AZURE.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
required: true
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a VMWare cloud with write access mode
avi_cloud:
username: '{{ username }}'
controller: '{{ controller }}'
password: '{{ password }}'
apic_mode: false
dhcp_enabled: true
enable_vip_static_routes: false
license_type: LIC_CORES
mtu: 1500
name: VCenter Cloud
prefer_static_routes: false
tenant_ref: admin
vcenter_configuration:
datacenter_ref: /api/vimgrdcruntime/datacenter-2-10.10.20.100
management_network: /api/vimgrnwruntime/dvportgroup-103-10.10.20.100
password: password
privilege: WRITE_ACCESS
username: user
vcenter_url: 10.10.20.100
vtype: CLOUD_VCENTER
"""
RETURN = '''
obj:
description: Cloud (api/cloud) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
apic_configuration=dict(type='dict',),
apic_mode=dict(type='bool',),
aws_configuration=dict(type='dict',),
azure_configuration=dict(type='dict',),
cloudstack_configuration=dict(type='dict',),
custom_tags=dict(type='list',),
dhcp_enabled=dict(type='bool',),
dns_provider_ref=dict(type='str',),
docker_configuration=dict(type='dict',),
east_west_dns_provider_ref=dict(type='str',),
east_west_ipam_provider_ref=dict(type='str',),
enable_vip_static_routes=dict(type='bool',),
ipam_provider_ref=dict(type='str',),
license_tier=dict(type='str',),
license_type=dict(type='str',),
linuxserver_configuration=dict(type='dict',),
mesos_configuration=dict(type='dict',),
mtu=dict(type='int',),
name=dict(type='str', required=True),
nsx_configuration=dict(type='dict',),
obj_name_prefix=dict(type='str',),
openstack_configuration=dict(type='dict',),
oshiftk8s_configuration=dict(type='dict',),
prefer_static_routes=dict(type='bool',),
proxy_configuration=dict(type='dict',),
rancher_configuration=dict(type='dict',),
state_based_dns_registration=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vca_configuration=dict(type='dict',),
vcenter_configuration=dict(type='dict',),
vtype=dict(type='str', required=True),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloud',
set([]))
if __name__ == '__main__':
main()
| le9i0nx/ansible | lib/ansible/modules/network/avi/avi_cloud.py | Python | gpl-3.0 | 11,134 | [
"VisIt"
] | 28d1299bec0a82b980ca2e44507ae61a160c2f1d48c278e835e47bebd6c6e0ca |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :class:`iris.experimental.ugrid.cf.CFUGridAuxiliaryCoordinateVariable` class.
todo: fold these tests into cf tests when experimental.ugrid is folded into
standard behaviour.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import numpy as np
from iris.experimental.ugrid.cf import (
CFUGridAuxiliaryCoordinateVariable,
logger,
)
from iris.tests.unit.experimental.ugrid.cf.test_CFUGridReader import (
netcdf_ugrid_variable,
)
def named_variable(name):
# Don't need to worry about dimensions or dtype for these tests.
return netcdf_ugrid_variable(name, "", int)
class TestIdentify(tests.IrisTest):
def setUp(self):
self.cf_identities = [
"node_coordinates",
"edge_coordinates",
"face_coordinates",
"volume_coordinates",
]
def test_cf_identities(self):
subject_name = "ref_subject"
ref_subject = named_variable(subject_name)
vars_common = {
subject_name: ref_subject,
"ref_not_subject": named_variable("ref_not_subject"),
}
# ONLY expecting ref_subject, excluding ref_not_subject.
expected = {
subject_name: CFUGridAuxiliaryCoordinateVariable(
subject_name, ref_subject
)
}
for identity in self.cf_identities:
ref_source = named_variable("ref_source")
setattr(ref_source, identity, subject_name)
vars_all = dict({"ref_source": ref_source}, **vars_common)
result = CFUGridAuxiliaryCoordinateVariable.identify(vars_all)
self.assertDictEqual(expected, result)
def test_duplicate_refs(self):
subject_name = "ref_subject"
ref_subject = named_variable(subject_name)
ref_source_vars = {
name: named_variable(name)
for name in ("ref_source_1", "ref_source_2")
}
for var in ref_source_vars.values():
setattr(var, self.cf_identities[0], subject_name)
vars_all = dict(
{
subject_name: ref_subject,
"ref_not_subject": named_variable("ref_not_subject"),
},
**ref_source_vars,
)
# ONLY expecting ref_subject, excluding ref_not_subject.
expected = {
subject_name: CFUGridAuxiliaryCoordinateVariable(
subject_name, ref_subject
)
}
result = CFUGridAuxiliaryCoordinateVariable.identify(vars_all)
self.assertDictEqual(expected, result)
def test_two_coords(self):
subject_names = ("ref_subject_1", "ref_subject_2")
ref_subject_vars = {
name: named_variable(name) for name in subject_names
}
ref_source_vars = {
name: named_variable(name)
for name in ("ref_source_1", "ref_source_2")
}
for ix, var in enumerate(ref_source_vars.values()):
setattr(var, self.cf_identities[ix], subject_names[ix])
vars_all = dict(
{"ref_not_subject": named_variable("ref_not_subject")},
**ref_subject_vars,
**ref_source_vars,
)
# Not expecting ref_not_subject.
expected = {
name: CFUGridAuxiliaryCoordinateVariable(name, var)
for name, var in ref_subject_vars.items()
}
result = CFUGridAuxiliaryCoordinateVariable.identify(vars_all)
self.assertDictEqual(expected, result)
def test_two_part_ref(self):
subject_names = ("ref_subject_1", "ref_subject_2")
ref_subject_vars = {
name: named_variable(name) for name in subject_names
}
ref_source = named_variable("ref_source")
setattr(ref_source, self.cf_identities[0], " ".join(subject_names))
vars_all = {
"ref_not_subject": named_variable("ref_not_subject"),
"ref_source": ref_source,
**ref_subject_vars,
}
expected = {
name: CFUGridAuxiliaryCoordinateVariable(name, var)
for name, var in ref_subject_vars.items()
}
result = CFUGridAuxiliaryCoordinateVariable.identify(vars_all)
self.assertDictEqual(expected, result)
def test_string_type_ignored(self):
subject_name = "ref_subject"
ref_source = named_variable("ref_source")
setattr(ref_source, self.cf_identities[0], subject_name)
vars_all = {
subject_name: netcdf_ugrid_variable(subject_name, "", np.bytes_),
"ref_not_subject": named_variable("ref_not_subject"),
"ref_source": ref_source,
}
result = CFUGridAuxiliaryCoordinateVariable.identify(vars_all)
self.assertDictEqual({}, result)
def test_ignore(self):
subject_names = ("ref_subject_1", "ref_subject_2")
ref_subject_vars = {
name: named_variable(name) for name in subject_names
}
ref_source_vars = {
name: named_variable(name)
for name in ("ref_source_1", "ref_source_2")
}
for ix, var in enumerate(ref_source_vars.values()):
setattr(var, self.cf_identities[0], subject_names[ix])
vars_all = dict(
{"ref_not_subject": named_variable("ref_not_subject")},
**ref_subject_vars,
**ref_source_vars,
)
# ONLY expect the subject variable that hasn't been ignored.
expected_name = subject_names[0]
expected = {
expected_name: CFUGridAuxiliaryCoordinateVariable(
expected_name, ref_subject_vars[expected_name]
)
}
result = CFUGridAuxiliaryCoordinateVariable.identify(
vars_all, ignore=subject_names[1]
)
self.assertDictEqual(expected, result)
def test_target(self):
subject_names = ("ref_subject_1", "ref_subject_2")
ref_subject_vars = {
name: named_variable(name) for name in subject_names
}
source_names = ("ref_source_1", "ref_source_2")
ref_source_vars = {name: named_variable(name) for name in source_names}
for ix, var in enumerate(ref_source_vars.values()):
setattr(var, self.cf_identities[0], subject_names[ix])
vars_all = dict(
{"ref_not_subject": named_variable("ref_not_subject")},
**ref_subject_vars,
**ref_source_vars,
)
# ONLY expect the variable referenced by the named ref_source_var.
expected_name = subject_names[0]
expected = {
expected_name: CFUGridAuxiliaryCoordinateVariable(
expected_name, ref_subject_vars[expected_name]
)
}
result = CFUGridAuxiliaryCoordinateVariable.identify(
vars_all, target=source_names[0]
)
self.assertDictEqual(expected, result)
def test_warn(self):
subject_name = "ref_subject"
ref_source = named_variable("ref_source")
setattr(ref_source, self.cf_identities[0], subject_name)
vars_all = {
"ref_not_subject": named_variable("ref_not_subject"),
"ref_source": ref_source,
}
# The warn kwarg and expected corresponding log level.
warn_and_level = {True: "WARNING", False: "DEBUG"}
# Missing warning.
log_regex = rf"Missing CF-netCDF auxiliary coordinate variable {subject_name}.*"
for warn, level in warn_and_level.items():
with self.assertLogs(logger, level=level, msg_regex=log_regex):
result = CFUGridAuxiliaryCoordinateVariable.identify(
vars_all, warn=warn
)
self.assertDictEqual({}, result)
# String variable warning.
log_regex = r".*is a CF-netCDF label variable.*"
for warn, level in warn_and_level.items():
with self.assertLogs(logger, level=level, msg_regex=log_regex):
vars_all[subject_name] = netcdf_ugrid_variable(
subject_name, "", np.bytes_
)
result = CFUGridAuxiliaryCoordinateVariable.identify(
vars_all, warn=warn
)
self.assertDictEqual({}, result)
| SciTools/iris | lib/iris/tests/unit/experimental/ugrid/cf/test_CFUGridAuxiliaryCoordinateVariable.py | Python | lgpl-3.0 | 8,596 | [
"NetCDF"
] | fad4cb7244972af3a189587a92fb475e96ba40066c7fcd1867e36badceaa6562 |
import re
from .. import factories as f
from .. utils import create_user_verify_login
def get_captcha_value(html_body):
captcha_text = re.split(r'What is', html_body)[1]
main_text = re.split(r'\?', captcha_text)
a = main_text[0].strip().split(' ')
if a[1] == '+':
return int(a[0]) + int(a[2])
if a[1] == '-':
return int(a[0]) - int(a[2])
if a[1] in ['*', '×']:
return int(a[0]) * int(a[2])
def test_contact_page(base_url, browser, outbox):
url = base_url + '/contact/'
browser.visit(url)
browser.fill('name', 'Full Name')
browser.fill('email', 'test@test.org')
browser.fill('comments', 'test@test.org')
browser.fill('contact_number', 999911)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
browser.visit(url)
browser.fill('name', 'Full Name')
browser.fill('email', 'test@test.org')
browser.fill('comments', 'test@test.org')
browser.fill('contact_number', 999911)
captcha_value = get_captcha_value(browser.html)
browser.fill('captcha_0', captcha_value)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('Contact Number should be of 10 digits')
browser.visit(url)
url = base_url + '/contact/'
browser.fill('name', 'Full Name')
browser.fill('email', 'test@test.org')
browser.fill('comments', 'test@test.org')
browser.fill('contact_number', 9999111111)
captcha_value = get_captcha_value(browser.html)
browser.fill('captcha_0', captcha_value)
browser.find_by_css('[type=submit]')[0].click()
# assert browser.is_text_present('Thank')
# ---------------- testing auto fill name and email -----------------------
f.create_usertype(slug='tutor', display_name='tutor')
user = create_user_verify_login(base_url, browser, outbox)
user.first_name = 'test'
user.last_name = 'testing'
user.save()
url = base_url + '/accounts/login/'
browser.visit(url)
browser.fill('login', user.email)
browser.fill('password', '123123')
browser.find_by_css('[type=submit]')[0].click()
url = base_url + '/contact/'
browser.visit(url)
name = browser.find_by_id('id_name').value
assert name == user.first_name + " " + user.last_name
email = browser.find_by_id('id_email').value
assert email == user.email
# -------------------After logging out---------------
url = base_url + '/accounts/logout/'
browser.visit(url)
assert 'Home | PythonExpress' in browser.title
url = base_url + '/contact/'
browser.visit(url)
name = browser.find_by_id('id_name').value
assert name == ''
email = browser.find_by_id('id_email').value
assert email == ''
| pythonindia/wye | tests/functional/test_contact_page.py | Python | mit | 2,764 | [
"VisIt"
] | e41f58e819a1530de646f681ab6850d171fcea12091bd7d31940ee6f9899f4c0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Popularity Contest (popcontest) parser."""
from __future__ import unicode_literals
import unittest
from plaso.lib import definitions
from plaso.parsers import popcontest
from tests.parsers import test_lib
class PopularityContestUnitTest(test_lib.ParserTestCase):
"""Tests for the popcontest parser."""
def testParse(self):
"""Tests the Parse function."""
parser = popcontest.PopularityContestParser()
storage_writer = self._ParseFile(['popcontest1.log'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 22)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2010-06-22 05:41:41.000000')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_ADDED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = (
'Session 0 start '
'ID 12345678901234567890123456789012 [ARCH:i386 POPCONVER:1.38]')
expected_short_message = 'Session 0 start'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[1]
self.CheckTimestamp(event.timestamp, '2010-06-22 07:34:42.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_ACCESS)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = 'mru [/usr/sbin/atd] package [at]'
expected_short_message = '/usr/sbin/atd'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[3]
self.CheckTimestamp(event.timestamp, '2010-06-22 07:34:43.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_ACCESS)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = (
'mru [/usr/lib/python2.5/lib-dynload/_struct.so] '
'package [python2.5-minimal]')
expected_short_message = '/usr/lib/python2.5/lib-dynload/_struct.so'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[5]
self.CheckTimestamp(event.timestamp, '2010-05-30 05:26:20.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_ACCESS)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = (
'mru [/usr/bin/empathy] package [empathy] tag [RECENT-CTIME]')
expected_short_message = '/usr/bin/empathy'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[6]
self.CheckTimestamp(event.timestamp, '2010-05-30 05:27:43.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = (
'mru [/usr/bin/empathy] package [empathy] tag [RECENT-CTIME]')
expected_short_message = '/usr/bin/empathy'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[11]
self.CheckTimestamp(event.timestamp, '2010-05-12 07:58:33.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_ACCESS)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = 'mru [/usr/bin/orca] package [gnome-orca] tag [OLD]'
expected_short_message = '/usr/bin/orca'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[13]
self.CheckTimestamp(event.timestamp, '2010-06-22 05:41:41.000000')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_ADDED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = 'Session 0 end'
expected_short_message = expected_message
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[14]
self.CheckTimestamp(event.timestamp, '2010-06-22 05:41:41.000000')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_ADDED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = (
'Session 1 start '
'ID 12345678901234567890123456789012 [ARCH:i386 POPCONVER:1.38]')
expected_short_message = 'Session 1 start'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[15]
self.CheckTimestamp(event.timestamp, '2010-06-22 07:34:42.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_ACCESS)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = 'mru [/super/cool/plasuz] package [plaso]'
expected_short_message = '/super/cool/plasuz'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[18]
self.CheckTimestamp(event.timestamp, '2010-04-06 12:25:42.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_ACCESS)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = 'mru [/super/cool/plasuz] package [miss_ctime]'
expected_short_message = '/super/cool/plasuz'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[19]
self.CheckTimestamp(event.timestamp, '2010-05-12 07:58:33.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_ACCESS)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = 'mru [/super/cóól] package [plaso] tag [WRONG_TAG]'
expected_short_message = '/super/cóól'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[21]
self.CheckTimestamp(event.timestamp, '2010-06-22 05:41:41.000000')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_ADDED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_message = 'Session 1 end'
expected_short_message = expected_message
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| rgayon/plaso | tests/parsers/popcontest.py | Python | apache-2.0 | 6,523 | [
"ORCA"
] | aaabfb981b88cd72f8dc1dcf1167c25269d1346ed4b0082951457b548bd06018 |
import argparse
import requests
from twisted.internet import reactor
try:
import ConfigParser as configparser
except ImportError:
import configparser
from pipes.connector import MatchPlayer
parser = argparse.ArgumentParser(description="connect to a casino")
parser.add_argument('--key', help="secret key for your bot")
parser.add_argument("--runtime", help="path to your bot's runtime")
parser.add_argument("--games", help="number of games to play before quitting",
type=int)
Config = configparser.ConfigParser()
Config.read('config.ini')
def get(param):
return Config.get('Poker', param)
class Server(object):
def __init__(self):
self.api = get('api')
def __repr__(self):
return "Server<{}>".format(self.api)
class Bot(object):
def __init__(self, server, args):
self.server = server
if args.key:
self.key = args.key
else:
self.key = get('key')
if args.runtime:
self.runtime = args.runtime
else:
self.runtime = get('runtime')
self.runtime = self.runtime.split(" ")
self.log_dir = get('log_dir')
self.get_info()
def get_info(self):
r = requests.get("{}/api/bot/{}".format(self.server.api, self.key))
bot_json = r.json()
if bot_json and bot_json.get('bot'):
self.info = bot_json.get('bot')
def __repr__(self):
return "Bot<{} || {}>".format(
self.key, self.runtime)
class GameCounter(object):
def __init__(self, player, args):
self.player = player
self.played = 0
self.games_wanted = args.games if args.games else int(get('games'))
def play_or_quit(self):
if self.played > self.games_wanted:
reactor.stop()
else:
self.played += 1
print("Playing game #{} of {}".format(
self.played, self.games_wanted
))
self.player.play(self.play_or_quit)
def print_banner(bot_info):
print("\n\nLogin succeeded. You are playing as:")
print(" '{n}' (key={k}) ".format(
n=bot_info.get('name'), k=bot_info.get('key')))
print(" Currently ranked #{r} with a skill of {s}".format(
r=bot_info.get('rank'), s=bot_info.get('skill')))
print("\n")
def main(args):
server = Server()
bot = Bot(server, args)
if bot.info:
print_banner(bot.info)
player = MatchPlayer(server, bot)
counter = GameCounter(player, args)
reactor.callLater(0.5, counter.play_or_quit)
reactor.run()
else:
print("Couldn't find your bot - please check the key and try again")
if __name__ == "__main__":
args = parser.parse_args()
main(args)
| gnmerritt/poker-plumbing | play.py | Python | mit | 2,768 | [
"CASINO"
] | 3746872351f7046d63f6efc89b9f8eed44a10e60a64f9f82bd78b1c507fa022f |
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# Python wrapper on LAMMPS library via ctypes
import sys,traceback,types
from ctypes import *
from os.path import dirname,abspath,join
from inspect import getsourcefile
class lammps:
# detect if Python is using version of mpi4py that can pass a communicator
has_mpi4py_v2 = False
try:
from mpi4py import MPI
from mpi4py import __version__ as mpi4py_version
if mpi4py_version.split('.')[0] == '2':
has_mpi4py_v2 = True
except:
pass
# create instance of LAMMPS
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
# determine module location
modpath = dirname(abspath(getsourcefile(lambda:0)))
# load liblammps.so unless name is given.
# e.g. if name = "g++", load liblammps_g++.so
# try loading the LAMMPS shared object from the location
# of lammps.py with an absolute path (so that LD_LIBRARY_PATH
# does not need to be set for regular installations.
# fall back to loading with a relative path, which typically
# requires LD_LIBRARY_PATH to be set appropriately.
try:
if not name: self.lib = CDLL(join(modpath,"liblammps.so"),RTLD_GLOBAL)
else: self.lib = CDLL(join(modpath,"liblammps_%s.so" % name),RTLD_GLOBAL)
except:
if not name: self.lib = CDLL("liblammps.so",RTLD_GLOBAL)
else: self.lib = CDLL("liblammps_%s.so" % name,RTLD_GLOBAL)
# if no ptr provided, create an instance of LAMMPS
# don't know how to pass an MPI communicator from PyPar
# but we can pass an MPI communicator from mpi4py v2.0.0 and later
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
# with mpi4py v2, can pass MPI communicator to LAMMPS
# need to adjust for type of MPI communicator object
# allow for int (like MPICH) or void* (like OpenMPI)
if lammps.has_mpi4py_v2 and comm != None:
if lammps.MPI._sizeof(lammps.MPI.Comm) == sizeof(c_int):
MPI_Comm = c_int
else:
MPI_Comm = c_void_p
narg = 0
cargs = 0
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
cargs = (c_char_p*narg)(*cmdargs)
self.lib.lammps_open.argtypes = [c_int, c_char_p*narg, \
MPI_Comm, c_void_p()]
else:
self.lib.lammps_open.argtypes = [c_int, c_int, \
MPI_Comm, c_void_p()]
self.lib.lammps_open.restype = None
self.opened = 1
self.lmp = c_void_p()
comm_ptr = lammps.MPI._addressof(comm)
comm_val = MPI_Comm.from_address(comm_ptr)
self.lib.lammps_open(narg,cargs,comm_val,byref(self.lmp))
else:
self.opened = 1
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
cargs = (c_char_p*narg)(*cmdargs)
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(narg,cargs,byref(self.lmp))
else:
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(0,None,byref(self.lmp))
# could use just this if LAMMPS lib interface supported it
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
else:
self.opened = 0
# magic to convert ptr to ctypes ptr
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
def __del__(self):
if self.lmp and self.opened: self.lib.lammps_close(self.lmp)
def close(self):
if self.opened: self.lib.lammps_close(self.lmp)
self.lmp = None
def version(self):
return self.lib.lammps_version(self.lmp)
def file(self,file):
file = file.encode()
self.lib.lammps_file(self.lmp,file)
def command(self,cmd):
cmd = cmd.encode()
self.lib.lammps_command(self.lmp,cmd)
def extract_global(self,name,type):
name = name.encode()
if type == 0:
self.lib.lammps_extract_global.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_double)
else: return None
ptr = self.lib.lammps_extract_global(self.lmp,name)
return ptr[0]
def extract_atom(self,name,type):
name = name.encode()
if type == 0:
self.lib.lammps_extract_atom.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int))
elif type == 2:
self.lib.lammps_extract_atom.restype = POINTER(c_double)
elif type == 3:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double))
else: return None
ptr = self.lib.lammps_extract_atom(self.lmp,name)
return ptr
def extract_compute(self,id,style,type):
id = id.encode()
if type == 0:
if style > 0: return None
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
if type == 1:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
if type == 2:
self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double))
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
return None
# in case of global datum, free memory for 1 double via lammps_free()
# double was allocated by library interface function
def extract_fix(self,id,style,type,i=0,j=0):
id = ide.encode()
if style == 0:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
elif (style == 1) or (style == 2):
if type == 1:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
elif type == 2:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
else:
return None
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
return ptr
else:
return None
# free memory for 1 double or 1 vector of doubles via lammps_free()
# for vector, must copy nlocal returned values to local c_double vector
# memory was allocated by library interface function
def extract_variable(self,name,group,type):
name = name.encode()
group = group.encode()
if type == 0:
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
if type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_int)
nlocalptr = self.lib.lammps_extract_global(self.lmp,"nlocal")
nlocal = nlocalptr[0]
result = (c_double*nlocal)()
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
for i in xrange(nlocal): result[i] = ptr[i]
self.lib.lammps_free(ptr)
return result
return None
# set variable value
# value is converted to string
# returns 0 for success, -1 if failed
def set_variable(self,name,value):
name = name.encode()
value = str(value).encode()
return self.lib.lammps_set_variable(self.lmp,name,str(value))
# return total number of atoms in system
def get_natoms(self):
return self.lib.lammps_get_natoms(self.lmp)
# return vector of atom properties gathered across procs, ordered by atom ID
def gather_atoms(self,name,type,count):
name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
else: return None
return data
# scatter vector of atom properties across procs, ordered by atom ID
# assume vector is of correct type and length, as created by gather_atoms()
def scatter_atoms(self,name,type,count,data):
name = name.encode()
self.lib.lammps_scatter_atoms(self.lmp,name,type,count,data)
| zimmermant/dlvo_lammps | python/lammps.py | Python | gpl-2.0 | 9,129 | [
"LAMMPS"
] | 28ad9659ac2aec2b97ff8c267c8790dd04f71367a2b96b958d037d8219fb7224 |
"""Ranks cog.
Keep track of active members on the server.
"""
import logging
import os
import random
import MySQLdb # The use of MySQL is debatable, but will use it to incorporate CMPT 354 stuff.
import discord
from discord.ext import commands
from __main__ import send_cmd_help # pylint: disable=no-name-in-module
from .utils.dataIO import dataIO # pylint: disable=relative-beyond-top-level
# Requires checks utility from:
# https://github.com/Rapptz/RoboDanny/tree/master/cogs/utils
from .utils import checks # pylint: disable=relative-beyond-top-level
# Global variables
LOGGER = None
SAVE_FOLDER = "data/lui-cogs/ranks/" # Path to save folder.
def checkFolder():
"""Used to create the data folder at first startup"""
if not os.path.exists(SAVE_FOLDER):
print("Creating " + SAVE_FOLDER + " folder...")
os.makedirs(SAVE_FOLDER)
def checkFiles():
"""Used to initialize an empty JSON settings database at first startup"""
base = {}
theFile = SAVE_FOLDER + "settings.json"
if not dataIO.is_valid_json(theFile):
print("Creating default ranks settings.json...")
dataIO.save_json(theFile, base)
theFile = SAVE_FOLDER + "lastspoke.json"
if not dataIO.is_valid_json(theFile):
print("Creating default ranks lastspoke.json...")
dataIO.save_json(theFile, base)
class Ranks:
"""Mee6-inspired guild rank management system.
Not optimized for multi-guild deployments.
"""
# Class constructor
def __init__(self, bot):
self.bot = bot
checkFolder()
checkFiles()
self.settings = dataIO.load_json(SAVE_FOLDER + 'settings.json')
self.lastspoke = dataIO.load_json(SAVE_FOLDER + 'lastspoke.json')
############
# COMMANDS #
############
# [p]levels
@commands.command(name="levels", pass_context=True, no_pm=True)
async def _ranksLevels(self, ctx):
"""Show the server ranking leaderboard"""
# Execute a MySQL query to order and check.
database = MySQLdb.connect(host=self.settings["mysql_host"],
user=self.settings["mysql_username"],
passwd=self.settings["mysql_password"])
cursor = database.cursor()
cursor.execute("SELECT userid, xp FROM renbot.xp WHERE guildid = {0} "
"order by xp desc limit 20".format(ctx.message.server.id))
msg = ":information_source: **Ranks - Leaderboard (WIP)**\n```"
rank = 1
for row in cursor.fetchall():
# row[0]: userID
# row[1]: xp
userID = row[0]
userObject = ctx.message.server.get_member(str(userID))
exp = row[1]
# Lookup the ID against the guild
if userObject is None:
continue
msg += str(rank).ljust(3)
msg += (str(userObject.display_name) + " ").ljust(23)
msg += str(exp).rjust(10) + "\n"
rank += 1
if rank == 11:
break
msg += "```\n Full rankings at https://ren.injabie3.moe/ranks/"
await self.bot.say(msg)
cursor.close()
database.close()
# [p]rank
@commands.command(name="rank", pass_context=True, no_pm=True)
async def _ranksCheck(self, ctx, ofUser: discord.Member = None): \
# pylint: disable=too-many-locals
"""Check your rank in the server."""
if ofUser is None:
ofUser = ctx.message.author
# Execute a MySQL query to order and check.
database = MySQLdb.connect(host=self.settings["mysql_host"],
user=self.settings["mysql_username"],
passwd=self.settings["mysql_password"])
cursor = database.cursor()
# Using query code from:
# https://stackoverflow.com/questions/13566695/select-increment-counter-in-mysql
# This code is now included in the stored procedure in the database.
cursor.execute("CALL renbot.getUserInfo({0},{1})".format(
ctx.message.server.id, ofUser.id))
embed = discord.Embed()
data = cursor.fetchone() # Data from the database.
try:
LOGGER.info(data)
rank = data[0]
userID = data[1]
level = data[2]
levelXP = data[3]
currentXP = data[4]
totalXP = data[5]
currentLevelXP = currentXP - totalXP
except IndexError as error:
await self.bot.say("Something went wrong when checking your level. "
"Please notify the admin!")
LOGGER.error(error)
database.close()
return
userObject = ctx.message.server.get_member(str(userID))
embed.set_author(name=userObject.display_name,
icon_url=userObject.avatar_url)
embed.colour = discord.Colour.red()
embed.add_field(name="Rank", value=int(rank))
embed.add_field(name="Level", value=level)
embed.add_field(name="Exp.", value="{0}/{1} (total {2})".format(
currentLevelXP, levelXP, currentXP))
embed.set_footer(text="Note: This EXP is different from Mee6.")
await self.bot.say(embed=embed)
database.close()
@commands.group(name="ranks", pass_context=True, no_pm=True)
async def _ranks(self, ctx):
"""Mee6-inspired guild rank management system. WIP"""
# Display the help context menu
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
#######################
# COMMANDS - SETTINGS #
#######################
#Ideally would be nice have this replaced by a web admin panel.
# [p]ranks settings
@_ranks.group(name="settings", pass_context=True, no_pm=True)
@checks.serverowner()
async def _settings(self, ctx):
"""Ranking system settings. Only server admins should see this."""
if str(ctx.invoked_subcommand).lower() == "ranks settings":
await send_cmd_help(ctx)
# [p]ranks settings default
@_settings.command(name="default", pass_context=True, no_pm=True)
async def _settingsDefault(self, ctx):
"""Set default for max points and cooldown."""
sid = ctx.message.server.id
self.settings[sid] = {}
self.settings[sid]["cooldown"] = 0
self.settings[sid]["maxPoints"] = 25
await self.bot.say(":information_source: **Ranks - Default:** Defaults set, run "
"`{}rank settings show` to verify the settings.".format(ctx.prefix))
# [p]ranks settings show
@_settings.command(name="show", pass_context=True, no_pm=True)
async def _settingsShow(self, ctx):
"""Show current settings."""
sid = ctx.message.server.id
try:
cooldown = self.settings[sid]["cooldown"]
maxPoints = self.settings[sid]["maxPoints"]
except KeyError:
# Not set.
await self.bot.say(":warning: **Ranks - Current Settings**: The server is "
"not configured! Please run `{}rank settings default` "
"first and try again.".format(ctx.prefix))
return
msg = ":information_source: **Ranks - Current Settings**:\n```"
msg += "Cooldown time: {0} seconds.\n".format(cooldown)
msg += "Maximum points: {0} points per eligible message```".format(maxPoints)
await self.bot.say(msg)
# [p]rank settings cooldown
@_settings.command(name="cooldown", pass_context=True)
async def _settingsCooldown(self, ctx, seconds: int):
"""Set the cooldown required between XP gains (in seconds)"""
sid = ctx.message.server.id
if seconds is None:
await self.bot.say(":negative_squared_cross_mark: **Ranks - Cooldown**: "
"Please enter a time in seconds!")
return
if seconds < 0:
await self.bot.say(":negative_squared_cross_mark: **Ranks - Cooldown**: "
"Please enter a valid time in seconds!")
return
# Save settings
self.settings = dataIO.load_json(SAVE_FOLDER + 'settings.json')
# Make sure the server id key exists.
if sid not in self.settings.keys():
self.settings[sid] = {}
self.settings[sid]["cooldown"] = seconds
dataIO.save_json(SAVE_FOLDER + 'settings.json', self.settings)
await self.bot.say(":white_check_mark: **Ranks - Cooldown**: Set to {0} "
"seconds.".format(seconds))
LOGGER.info("Cooldown changed by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
LOGGER.info("Cooldown set to %s seconds",
seconds)
#[p]rank settings maxpoints
@_settings.command(name="maxpoints", pass_context=True)
async def _settingsMaxpoints(self, ctx, maxpoints: int = 25):
"""Set max points per eligible message. Defaults to 25 points."""
sid = ctx.message.server.id
if maxpoints < 0:
await self.bot.say(":negative_squared_cross_mark: **Ranks - Max Points**: "
"Please enter a positive number.")
return
# Save settings
self.settings = dataIO.load_json(SAVE_FOLDER + 'settings.json')
# Make sure the server id key exists.
if sid not in self.settings.keys():
self.settings[sid] = {}
self.settings[sid]["maxPoints"] = maxpoints
dataIO.save_json(SAVE_FOLDER + 'settings.json', self.settings)
await self.bot.say(":white_check_mark: **Ranks - Max Points**: Users can gain "
"up to {0} points per eligible message.".format(maxpoints))
LOGGER.info("Maximum points changed by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
LOGGER.info("Maximum points per message set to %s.",
maxpoints)
#[p]rank settings dbsetup
@_settings.command(name="dbsetup", pass_context=True)
@checks.serverowner()
async def _settingsDbSetup(self, ctx):
"""Perform database set up. DO NOT USE if ranks is working."""
await self.bot.say("MySQL Set up:\n"
"What is the host you wish to connect to?")
host = await self.bot.wait_for_message(timeout=30,
author=ctx.message.author,
channel=ctx.message.channel)
if host is None:
await self.bot.say("No response received, not setting anything!")
return
await self.bot.say("What is the username you want to use to connect?")
username = await self.bot.wait_for_message(timeout=30,
author=ctx.message.author,
channel=ctx.message.channel)
if username is None:
await self.bot.say("No response received, not setting anything!")
return
await self.bot.say("What is the password you want to use to connect? You "
"can use a dummy password and manually change it in the "
"JSON config later.")
password = await self.bot.wait_for_message(timeout=30,
author=ctx.message.author,
channel=ctx.message.channel)
if password is None:
await self.bot.say("No response received, not setting anything!")
return
# Save settings
self.settings = dataIO.load_json(SAVE_FOLDER + 'settings.json')
self.settings["mysql_host"] = host.content
self.settings["mysql_username"] = username.content
self.settings["mysql_password"] = password.content
dataIO.save_json(SAVE_FOLDER + 'settings.json', self.settings)
await self.bot.say("Settings saved.")
LOGGER.info("Database connection changed by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
####################
# HELPER FUNCTIONS #
####################
def addPoints(self, guildID, userID):
"""Add rank points between 0 and MAX_POINTS to the user"""
try:
pointsToAdd = random.randint(0, self.settings[guildID]["maxPoints"])
except KeyError:
# Most likely key error, use default 25.
pointsToAdd = random.randint(0, 25)
database = MySQLdb.connect(host=self.settings["mysql_host"],
user=self.settings["mysql_username"],
passwd=self.settings["mysql_password"])
cursor = database.cursor()
fetch = cursor.execute("SELECT xp from renbot.xp WHERE userid = {0} and "
"guildid = {1}".format(userID, guildID))
currentXP = 0
if fetch != 0: # This user has past XP that we can add to.
result = cursor.fetchall()
currentXP = result[0][0] + pointsToAdd
else: # New user
currentXP = pointsToAdd
cursor.execute("REPLACE INTO renbot.xp (userid, guildid, xp) VALUES ({0}, "
"{1}, {2})".format(userID, guildID, currentXP))
database.commit()
cursor.close()
database.close()
async def checkFlood(self, message):
"""Check to see if the user is sending messages that are flooding the server.
If yes, then do not add points.
"""
# Decide whether to store last spoken user data in:
# - MySQL
# - JSON
# - or leave in RAM.
# Check as follows:
# - Get the user ID and message time
# - Check the last message time that was used to add points to the current
# user.
# - If this time does not exceed COOLDOWN, return and do nothing.
# - If this time exceeds COOLDOWN, update the last spoken time of this user
# with the message time.
# - Add points between 0 and MAX_POINTS (use random).
# - Return.
timestamp = message.timestamp.timestamp()
if message.author.bot:
return
if message.channel.is_private:
return
sid = message.server.id
uid = message.author.id
try:
# If the time does not exceed COOLDOWN, return and do nothing.
if timestamp - self.lastspoke[sid][uid]["timestamp"] <= self.settings[sid]["cooldown"]:
return
# Update last spoke time with new message time.
except KeyError:
# Most likely key error, so create the key, then update
# last spoke time with new message time.
try:
self.lastspoke[sid][uid] = {}
except KeyError:
self.lastspoke[sid] = {}
self.lastspoke[sid][uid] = {}
LOGGER.error("%s#%s (%s) has not spoken since last restart, adding new "
"timestamp",
message.author.name,
message.author.discriminator,
uid)
self.lastspoke[sid][uid]["timestamp"] = timestamp
self.addPoints(message.server.id, message.author.id)
def setup(bot):
"""Add the cog to the bot"""
global LOGGER # pylint: disable=global-statement
checkFolder() # Make sure the data folder exists!
checkFiles() # Make sure we have a local database!
LOGGER = logging.getLogger("red.Ranks")
if LOGGER.level == 0:
# Prevents the LOGGER from being loaded again in case of module reload.
LOGGER.setLevel(logging.INFO)
handler = logging.FileHandler(filename=SAVE_FOLDER+"info.log",
encoding="utf-8",
mode="a")
handler.setFormatter(logging.Formatter("%(asctime)s %(message)s",
datefmt="[%d/%m/%Y %H:%M:%S]"))
LOGGER.addHandler(handler)
rankingSystem = Ranks(bot)
bot.add_cog(rankingSystem)
bot.add_listener(rankingSystem.checkFlood, 'on_message')
| Injabie3/lui-cogs | ranks/ranks.py | Python | gpl-3.0 | 17,074 | [
"MOE"
] | 97a8127fed4c25ad0fcc0a65524eb0292c446f35ac0940c74863be9f3d920beb |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains objects that are used to describe the environments in a structure. The most detailed object
(StructureEnvironments) contains a very thorough analysis of the environments of a given atom but is difficult to
used as such. The LightStructureEnvironments object is a lighter version that is obtained by applying a "strategy"
on the StructureEnvironments object. Basically, the LightStructureEnvironments provides the coordination environment(s)
and possibly some fraction corresponding to these.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import numpy as np
from monty.json import MontyDecoder, MSONable, jsanitize
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import (
AllCoordinationGeometries,
)
from pymatgen.analysis.chemenv.coordination_environments.voronoi import (
DetailedVoronoiContainer,
)
from pymatgen.analysis.chemenv.utils.chemenv_errors import ChemenvError
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.core.periodic_table import Element, Species
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
allcg = AllCoordinationGeometries()
symbol_cn_mapping = allcg.get_symbol_cn_mapping()
class StructureEnvironments(MSONable):
"""
Class used to store the chemical environments of a given structure.
"""
AC = AdditionalConditions()
class NeighborsSet:
"""
Class used to store a given set of neighbors of a given site (based on the detailed_voronoi).
"""
def __init__(self, structure, isite, detailed_voronoi, site_voronoi_indices, sources=None):
"""
Constructor for NeighborsSet.
Args:
structure: Structure object.
isite: Index of the site for which neighbors are stored in this NeighborsSet.
detailed_voronoi: Corresponding DetailedVoronoiContainer object containing all the possible
neighbors of the give site.
site_voronoi_indices: Indices of the voronoi sites in the DetailedVoronoiContainer object that
make up this NeighborsSet.
sources: Sources for this NeighborsSet, i.e. how this NeighborsSet was generated.
"""
self.structure = structure
self.isite = isite
self.detailed_voronoi = detailed_voronoi
self.voronoi = detailed_voronoi.voronoi_list2[isite]
myset = set(site_voronoi_indices)
if len(myset) != len(site_voronoi_indices):
raise ValueError("Set of neighbors contains duplicates !")
self.site_voronoi_indices = sorted(myset)
if sources is None:
self.sources = [{"origin": "UNKNOWN"}]
elif isinstance(sources, list):
self.sources = sources
else:
self.sources = [sources]
def get_neighb_voronoi_indices(self, permutation):
"""
Return the indices in the detailed_voronoi corresponding to the current permutation.
Args:
permutation: Current permutation for which the indices in the detailed_voronoi are needed.
Returns: List of indices in the detailed_voronoi.
"""
return [self.site_voronoi_indices[ii] for ii in permutation]
@property
def neighb_coords(self):
"""
Coordinates of neighbors for this NeighborsSet.
"""
return [self.voronoi[inb]["site"].coords for inb in self.site_voronoi_indices]
@property
def neighb_coordsOpt(self):
"""
Optimized access to the coordinates of neighbors for this NeighborsSet.
"""
return self.detailed_voronoi.voronoi_list_coords[self.isite].take(self.site_voronoi_indices, axis=0)
@property
def neighb_sites(self):
"""
Neighbors for this NeighborsSet as pymatgen Sites.
"""
return [self.voronoi[inb]["site"] for inb in self.site_voronoi_indices]
@property
def neighb_sites_and_indices(self):
"""
List of neighbors for this NeighborsSet as pymatgen Sites and their index in the original structure.
"""
return [
{"site": self.voronoi[inb]["site"], "index": self.voronoi[inb]["index"]}
for inb in self.site_voronoi_indices
]
@property
def coords(self):
"""
Coordinates of the current central atom and its neighbors for this NeighborsSet.
"""
coords = [self.structure[self.isite].coords]
coords.extend(self.neighb_coords)
return coords
@property
def normalized_distances(self):
"""
Normalized distances to each neighbor in this NeighborsSet.
"""
return [self.voronoi[inb]["normalized_distance"] for inb in self.site_voronoi_indices]
@property
def normalized_angles(self):
"""
Normalized angles for each neighbor in this NeighborsSet.
"""
return [self.voronoi[inb]["normalized_angle"] for inb in self.site_voronoi_indices]
@property
def distances(self):
"""
Distances to each neighbor in this NeighborsSet.
"""
return [self.voronoi[inb]["distance"] for inb in self.site_voronoi_indices]
@property
def angles(self):
"""
Angles for each neighbor in this NeighborsSet.
"""
return [self.voronoi[inb]["angle"] for inb in self.site_voronoi_indices]
# @property
# def sphere_fraction_angles(self):
# return [0.25 * self.voronoi[inb]['angle'] / np.pi for inb in self.site_voronoi_indices]
@property
def info(self):
"""
Summarized information about this NeighborsSet.
"""
was = self.normalized_angles
wds = self.normalized_distances
angles = self.angles
distances = self.distances
return {
"normalized_angles": was,
"normalized_distances": wds,
"normalized_angles_sum": np.sum(was),
"normalized_angles_mean": np.mean(was),
"normalized_angles_std": np.std(was),
"normalized_angles_min": np.min(was),
"normalized_angles_max": np.max(was),
"normalized_distances_mean": np.mean(wds),
"normalized_distances_std": np.std(wds),
"normalized_distances_min": np.min(wds),
"normalized_distances_max": np.max(wds),
"angles": angles,
"distances": distances,
"angles_sum": np.sum(angles),
"angles_mean": np.mean(angles),
"angles_std": np.std(angles),
"angles_min": np.min(angles),
"angles_max": np.max(angles),
"distances_mean": np.mean(distances),
"distances_std": np.std(distances),
"distances_min": np.min(distances),
"distances_max": np.max(distances),
}
def distance_plateau(self):
"""
Returns the distances plateau's for this NeighborsSet.
"""
all_nbs_normalized_distances_sorted = sorted(
(nb["normalized_distance"] for nb in self.voronoi), reverse=True
)
maxdist = np.max(self.normalized_distances)
plateau = None
for idist, dist in enumerate(all_nbs_normalized_distances_sorted):
if np.isclose(
dist,
maxdist,
rtol=0.0,
atol=self.detailed_voronoi.normalized_distance_tolerance,
):
if idist == 0:
plateau = np.inf
else:
plateau = all_nbs_normalized_distances_sorted[idist - 1] - maxdist
break
if plateau is None:
raise ValueError("Plateau not found ...")
return plateau
def angle_plateau(self):
"""
Returns the angles plateau's for this NeighborsSet.
"""
all_nbs_normalized_angles_sorted = sorted(nb["normalized_angle"] for nb in self.voronoi)
minang = np.min(self.normalized_angles)
# print('minang', minang)
# print('all_nbs_normalized_angles_sorted', all_nbs_normalized_angles_sorted)
for nb in self.voronoi:
print(nb)
plateau = None
for iang, ang in enumerate(all_nbs_normalized_angles_sorted):
if np.isclose(
ang,
minang,
rtol=0.0,
atol=self.detailed_voronoi.normalized_angle_tolerance,
):
if iang == 0:
plateau = minang
else:
plateau = minang - all_nbs_normalized_angles_sorted[iang - 1]
break
if plateau is None:
raise ValueError("Plateau not found ...")
return plateau
def voronoi_grid_surface_points(self, additional_condition=1, other_origins="DO_NOTHING"):
"""
Get the surface points in the Voronoi grid for this neighbor from the sources.
The general shape of the points should look like a staircase such as in the following figure :
^
0.0|
|
| B----C
| | |
| | |
a | k D-------E
n | | |
g | | |
l | | |
e | j F----n---------G
| | |
| | |
| A----g-------h----i---------H
|
|
1.0+------------------------------------------------->
1.0 distance 2.0 ->+Inf
Args:
additional_condition: Additional condition for the neighbors.
other_origins: What to do with sources that do not come from the Voronoi grid (e.g. "from hints").
"""
mysrc = []
for src in self.sources:
if src["origin"] == "dist_ang_ac_voronoi":
if src["ac"] != additional_condition:
continue
mysrc.append(src)
else:
if other_origins == "DO_NOTHING":
continue
raise NotImplementedError("Nothing implemented for other sources ...")
if len(mysrc) == 0:
return None
dists = [src["dp_dict"]["min"] for src in mysrc]
angs = [src["ap_dict"]["max"] for src in mysrc]
next_dists = [src["dp_dict"]["next"] for src in mysrc]
next_angs = [src["ap_dict"]["next"] for src in mysrc]
points_dict = {}
pdists = []
pangs = []
for isrc in range(len(mysrc)):
if not any(np.isclose(pdists, dists[isrc])):
pdists.append(dists[isrc])
if not any(np.isclose(pdists, next_dists[isrc])):
pdists.append(next_dists[isrc])
if not any(np.isclose(pangs, angs[isrc])):
pangs.append(angs[isrc])
if not any(np.isclose(pangs, next_angs[isrc])):
pangs.append(next_angs[isrc])
d1_indices = np.argwhere(np.isclose(pdists, dists[isrc])).flatten()
if len(d1_indices) != 1:
raise ValueError("Distance parameter not found ...")
d2_indices = np.argwhere(np.isclose(pdists, next_dists[isrc])).flatten()
if len(d2_indices) != 1:
raise ValueError("Distance parameter not found ...")
a1_indices = np.argwhere(np.isclose(pangs, angs[isrc])).flatten()
if len(a1_indices) != 1:
raise ValueError("Angle parameter not found ...")
a2_indices = np.argwhere(np.isclose(pangs, next_angs[isrc])).flatten()
if len(a2_indices) != 1:
raise ValueError("Angle parameter not found ...")
id1 = d1_indices[0]
id2 = d2_indices[0]
ia1 = a1_indices[0]
ia2 = a2_indices[0]
for id_ia in [(id1, ia1), (id1, ia2), (id2, ia1), (id2, ia2)]:
if id_ia not in points_dict:
points_dict[id_ia] = 0
points_dict[id_ia] += 1
new_pts = []
for pt, pt_nb in points_dict.items():
if pt_nb % 2 == 1:
new_pts.append(pt)
sorted_points = [(0, 0)]
move_ap_index = True
while True:
last_pt = sorted_points[-1]
if move_ap_index: # "Move" the angle parameter
idp = last_pt[0]
iap = None
for pt in new_pts:
if pt[0] == idp and pt != last_pt:
iap = pt[1]
break
else: # "Move" the distance parameter
idp = None
iap = last_pt[1]
for pt in new_pts:
if pt[1] == iap and pt != last_pt:
idp = pt[0]
break
if (idp, iap) == (0, 0):
break
if (idp, iap) in sorted_points:
raise ValueError("Error sorting points ...")
sorted_points.append((idp, iap))
move_ap_index = not move_ap_index
points = [(pdists[idp], pangs[iap]) for (idp, iap) in sorted_points]
return points
@property
def source(self):
"""
Returns the source of this NeighborsSet (how it was generated, e.g. from which Voronoi cut-offs, or from
hints).
"""
if len(self.sources) != 1:
raise RuntimeError("Number of sources different from 1 !")
return self.sources[0]
def add_source(self, source):
"""
Add a source to this NeighborsSet.
Args:
source: Information about the generation of this NeighborsSet.
"""
if source not in self.sources:
self.sources.append(source)
def __len__(self):
return len(self.site_voronoi_indices)
def __hash__(self):
return len(self.site_voronoi_indices)
def __eq__(self, other):
return self.isite == other.isite and self.site_voronoi_indices == other.site_voronoi_indices
def __ne__(self, other):
return not self == other
def __str__(self):
out = f"Neighbors Set for site #{self.isite:d} :\n"
out += f" - Coordination number : {len(self):d}\n"
out += " - Voronoi indices : {}\n".format(
", ".join([f"{site_voronoi_index:d}" for site_voronoi_index in self.site_voronoi_indices])
)
return out
def as_dict(self):
"""
A JSON serializable dict representation of the NeighborsSet.
"""
return {
"isite": self.isite,
"site_voronoi_indices": self.site_voronoi_indices,
"sources": self.sources,
}
@classmethod
def from_dict(cls, dd, structure, detailed_voronoi):
"""
Reconstructs the NeighborsSet algorithm from its JSON serializable dict representation, together with
the structure and the DetailedVoronoiContainer.
As an inner (nested) class, the NeighborsSet is not supposed to be used anywhere else that inside the
StructureEnvironments. The from_dict method is thus using the structure and detailed_voronoi when
reconstructing itself. These two are both in the StructureEnvironments object.
Args:
dd: a JSON serializable dict representation of a NeighborsSet.
structure: The structure.
detailed_voronoi: The Voronoi object containing all the neighboring atoms from which the subset of
neighbors for this NeighborsSet is extracted.
Returns: a NeighborsSet.
"""
return cls(
structure=structure,
isite=dd["isite"],
detailed_voronoi=detailed_voronoi,
site_voronoi_indices=dd["site_voronoi_indices"],
sources=dd["sources"],
)
def __init__(
self,
voronoi,
valences,
sites_map,
equivalent_sites,
ce_list,
structure,
neighbors_sets=None,
info=None,
):
"""
Constructor for the StructureEnvironments object.
Args:
voronoi: VoronoiContainer object for the structure.
valences: Valences provided.
sites_map: Mapping of equivalent sites to the unequivalent sites that have been computed.
equivalent_sites: List of list of equivalent sites of the structure.
ce_list: List of chemical environments.
structure: Structure object.
neighbors_sets: List of neighbors sets.
info: Additional information for this StructureEnvironments object.
"""
self.voronoi = voronoi
self.valences = valences
self.sites_map = sites_map
self.equivalent_sites = equivalent_sites
# self.struct_sites_to_irreducible_site_list_map = struct_sites_to_irreducible_site_list_map
self.ce_list = ce_list
self.structure = structure
if neighbors_sets is None:
self.neighbors_sets = [None] * len(self.structure)
else:
self.neighbors_sets = neighbors_sets
self.info = info
def init_neighbors_sets(self, isite, additional_conditions=None, valences=None):
"""
Initialize the list of neighbors sets for the current site.
Args:
isite: Index of the site under consideration.
additional_conditions: Additional conditions to be used for the initialization of the list of
neighbors sets, e.g. "Only anion-cation bonds", ...
valences: List of valences for each site in the structure (needed if an additional condition based on the
valence is used, e.g. only anion-cation bonds).
"""
site_voronoi = self.voronoi.voronoi_list2[isite]
if site_voronoi is None:
return
if additional_conditions is None:
additional_conditions = self.AC.ALL
if (self.AC.ONLY_ACB in additional_conditions or self.AC.ONLY_ACB_AND_NO_E2SEB) and valences is None:
raise ChemenvError(
"StructureEnvironments",
"init_neighbors_sets",
"Valences are not given while only_anion_cation_bonds are allowed. Cannot continue",
)
site_distance_parameters = self.voronoi.neighbors_normalized_distances[isite]
site_angle_parameters = self.voronoi.neighbors_normalized_angles[isite]
# Precompute distance conditions
distance_conditions = []
for idp, dp_dict in enumerate(site_distance_parameters):
distance_conditions.append([])
for inb, voro_nb_dict in enumerate(site_voronoi):
cond = inb in dp_dict["nb_indices"]
distance_conditions[idp].append(cond)
# Precompute angle conditions
angle_conditions = []
for iap, ap_dict in enumerate(site_angle_parameters):
angle_conditions.append([])
for inb, voro_nb_dict in enumerate(site_voronoi):
cond = inb in ap_dict["nb_indices"]
angle_conditions[iap].append(cond)
# Precompute additional conditions
precomputed_additional_conditions = {ac: [] for ac in additional_conditions}
for inb, voro_nb_dict in enumerate(site_voronoi):
for ac in additional_conditions:
cond = self.AC.check_condition(
condition=ac,
structure=self.structure,
parameters={
"valences": valences,
"neighbor_index": voro_nb_dict["index"],
"site_index": isite,
},
)
precomputed_additional_conditions[ac].append(cond)
# Add the neighbors sets based on the distance/angle/additional parameters
for idp, dp_dict in enumerate(site_distance_parameters):
for iap, ap_dict in enumerate(site_angle_parameters):
for iac, ac in enumerate(additional_conditions):
src = {
"origin": "dist_ang_ac_voronoi",
"idp": idp,
"iap": iap,
"dp_dict": dp_dict,
"ap_dict": ap_dict,
"iac": iac,
"ac": ac,
"ac_name": self.AC.CONDITION_DESCRIPTION[ac],
}
site_voronoi_indices = [
inb
for inb, voro_nb_dict in enumerate(site_voronoi)
if (
distance_conditions[idp][inb]
and angle_conditions[iap][inb]
and precomputed_additional_conditions[ac][inb]
)
]
nb_set = self.NeighborsSet(
structure=self.structure,
isite=isite,
detailed_voronoi=self.voronoi,
site_voronoi_indices=site_voronoi_indices,
sources=src,
)
self.add_neighbors_set(isite=isite, nb_set=nb_set)
def add_neighbors_set(self, isite, nb_set):
"""
Adds a neighbor set to the list of neighbors sets for this site.
Args:
isite: Index of the site under consideration.
nb_set: NeighborsSet to be added.
"""
if self.neighbors_sets[isite] is None:
self.neighbors_sets[isite] = {}
self.ce_list[isite] = {}
cn = len(nb_set)
if cn not in self.neighbors_sets[isite]:
self.neighbors_sets[isite][cn] = []
self.ce_list[isite][cn] = []
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
self.neighbors_sets[isite][cn][nb_set_index].add_source(nb_set.source)
except ValueError:
self.neighbors_sets[isite][cn].append(nb_set)
self.ce_list[isite][cn].append(None)
def update_coordination_environments(self, isite, cn, nb_set, ce):
"""
Updates the coordination environment for this site, coordination and neighbor set.
Args:
isite: Index of the site to be updated.
cn: Coordination to be updated.
nb_set: Neighbors set to be updated.
ce: ChemicalEnvironments object for this neighbors set.
"""
if self.ce_list[isite] is None:
self.ce_list[isite] = {}
if cn not in self.ce_list[isite]:
self.ce_list[isite][cn] = []
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
except ValueError:
raise ValueError("Neighbors set not found in the structure environments")
if nb_set_index == len(self.ce_list[isite][cn]):
self.ce_list[isite][cn].append(ce)
elif nb_set_index < len(self.ce_list[isite][cn]):
self.ce_list[isite][cn][nb_set_index] = ce
else:
raise ValueError("Neighbors set not yet in ce_list !")
def update_site_info(self, isite, info_dict):
"""
Update information about this site.
Args:
isite: Index of the site for which info has to be updated.
info_dict: Dictionary of information to be added for this site.
"""
if "sites_info" not in self.info:
self.info["sites_info"] = [{} for _ in range(len(self.structure))]
self.info["sites_info"][isite].update(info_dict)
def get_coordination_environments(self, isite, cn, nb_set):
"""
Get the ChemicalEnvironments for a given site, coordination and neighbors set.
Args:
isite: Index of the site for which the ChemicalEnvironments is looked for.
cn: Coordination for which the ChemicalEnvironments is looked for.
nb_set: Neighbors set for which the ChemicalEnvironments is looked for.
Returns: a ChemicalEnvironments object.
"""
if self.ce_list[isite] is None:
return None
if cn not in self.ce_list[isite]:
return None
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
except ValueError:
return None
return self.ce_list[isite][cn][nb_set_index]
def get_csm(self, isite, mp_symbol):
"""
Get the continuous symmetry measure for a given site in the given coordination environment.
Args:
isite: Index of the site.
mp_symbol: Symbol of the coordination environment for which we want the continuous symmetry measure.
Returns: Continuous symmetry measure of the given site in the given environment.
"""
csms = self.get_csms(isite, mp_symbol)
if len(csms) != 1:
raise ChemenvError(
"StructureEnvironments",
"get_csm",
f'Number of csms for site #{str(isite)} with mp_symbol "{mp_symbol}" = {str(len(csms))}',
)
return csms[0]
def get_csms(self, isite, mp_symbol):
"""
Returns the continuous symmetry measure(s) of site with index isite with respect to the
perfect coordination environment with mp_symbol. For some environments, a given mp_symbol might not
be available (if there is no voronoi parameters leading to a number of neighbours corresponding to
the coordination number of environment mp_symbol). For some environments, a given mp_symbol might
lead to more than one csm (when two or more different voronoi parameters lead to different neighbours
but with same number of neighbours).
Args:
isite: Index of the site.
mp_symbol: MP symbol of the perfect environment for which the csm has to be given.
Returns:
List of csms for site isite with respect to geometry mp_symbol
"""
cn = symbol_cn_mapping[mp_symbol]
if cn not in self.ce_list[isite]:
return []
return [envs[mp_symbol] for envs in self.ce_list[isite][cn]]
def plot_csm_and_maps(self, isite, max_csm=8.0):
"""
Plotting of the coordination numbers of a given site for all the distfactor/angfactor parameters. If the
chemical environments are given, a color map is added to the plot, with the lowest continuous symmetry measure
as the value for the color of that distfactor/angfactor set.
Args:
isite: Index of the site for which the plot has to be done
max_csm: Maximum continuous symmetry measure to be shown.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return None
fig = self.get_csm_and_maps(isite=isite, max_csm=max_csm)
if fig is None:
return None
plt.show()
return None
def get_csm_and_maps(self, isite, max_csm=8.0, figsize=None, symmetry_measure_type=None):
"""
Plotting of the coordination numbers of a given site for all the distfactor/angfactor parameters. If the
chemical environments are given, a color map is added to the plot, with the lowest continuous symmetry measure
as the value for the color of that distfactor/angfactor set.
Args:
isite: Index of the site for which the plot has to be done.
max_csm: Maximum continuous symmetry measure to be shown.
figsize: Size of the figure.
symmetry_measure_type: Type of continuous symmetry measure to be used.
Returns:
Matplotlib figure and axes representing the csm and maps.
"""
try:
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return None
if symmetry_measure_type is None:
symmetry_measure_type = "csm_wcs_ctwcc"
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
gs = GridSpec(2, 1, hspace=0.0, wspace=0.0)
subplot = fig.add_subplot(gs[:])
subplot_distang = subplot.twinx()
ix = 0
cn_maps = []
all_wds = []
all_was = []
max_wd = 0.0
for cn, nb_sets in self.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(nb_sets):
ce = self.ce_list[isite][cn][inb_set]
if ce is None:
continue
mingeoms = ce.minimum_geometries(max_csm=max_csm)
if len(mingeoms) == 0:
continue
wds = nb_set.normalized_distances
max_wd = max(max_wd, max(wds))
all_wds.append(wds)
all_was.append(nb_set.normalized_angles)
for mp_symbol, cg_dict in mingeoms:
csm = cg_dict["other_symmetry_measures"][symmetry_measure_type]
subplot.plot(ix, csm, "ob")
subplot.annotate(mp_symbol, xy=(ix, csm))
cn_maps.append((cn, inb_set))
ix += 1
if max_wd < 1.225:
ymax_wd = 1.25
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 1.36:
ymax_wd = 1.4
yticks_wd = np.linspace(1.0, ymax_wd, 5)
elif max_wd < 1.45:
ymax_wd = 1.5
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 1.55:
ymax_wd = 1.6
yticks_wd = np.linspace(1.0, ymax_wd, 7)
elif max_wd < 1.75:
ymax_wd = 1.8
yticks_wd = np.linspace(1.0, ymax_wd, 5)
elif max_wd < 1.95:
ymax_wd = 2.0
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 2.35:
ymax_wd = 2.5
yticks_wd = np.linspace(1.0, ymax_wd, 7)
else:
ymax_wd = np.ceil(1.1 * max_wd)
yticks_wd = np.linspace(1.0, ymax_wd, 6)
yticks_wa = np.linspace(0.0, 1.0, 6)
frac_bottom = 0.05
frac_top = 0.05
frac_middle = 0.1
yamin = frac_bottom
yamax = 0.5 - frac_middle / 2
ydmin = 0.5 + frac_middle / 2
ydmax = 1.0 - frac_top
def yang(wa):
return (yamax - yamin) * np.array(wa) + yamin
def ydist(wd):
return (np.array(wd) - 1.0) / (ymax_wd - 1.0) * (ydmax - ydmin) + ydmin
for ix, was in enumerate(all_was):
subplot_distang.plot(0.2 + ix * np.ones_like(was), yang(was), "<g")
if np.mod(ix, 2) == 0:
alpha = 0.3
else:
alpha = 0.1
subplot_distang.fill_between(
[-0.5 + ix, 0.5 + ix],
[1.0, 1.0],
0.0,
facecolor="k",
alpha=alpha,
zorder=-1000,
)
for ix, wds in enumerate(all_wds):
subplot_distang.plot(0.2 + ix * np.ones_like(wds), ydist(wds), "sm")
subplot_distang.plot([-0.5, len(cn_maps)], [0.5, 0.5], "k--", alpha=0.5)
yticks = yang(yticks_wa).tolist()
yticks.extend(ydist(yticks_wd).tolist())
yticklabels = yticks_wa.tolist()
yticklabels.extend(yticks_wd.tolist())
subplot_distang.set_yticks(yticks)
subplot_distang.set_yticklabels(yticklabels)
fake_subplot_ang = fig.add_subplot(gs[1], frame_on=False)
fake_subplot_dist = fig.add_subplot(gs[0], frame_on=False)
fake_subplot_ang.set_yticks([])
fake_subplot_dist.set_yticks([])
fake_subplot_ang.set_xticks([])
fake_subplot_dist.set_xticks([])
fake_subplot_ang.set_ylabel("Angle parameter", labelpad=45, rotation=-90)
fake_subplot_dist.set_ylabel("Distance parameter", labelpad=45, rotation=-90)
fake_subplot_ang.yaxis.set_label_position("right")
fake_subplot_dist.yaxis.set_label_position("right")
subplot_distang.set_ylim([0.0, 1.0])
subplot.set_xticks(range(len(cn_maps)))
subplot.set_ylabel("Continuous symmetry measure")
subplot.set_xlim([-0.5, len(cn_maps) - 0.5])
subplot_distang.set_xlim([-0.5, len(cn_maps) - 0.5])
subplot.set_xticklabels([str(cn_map) for cn_map in cn_maps])
return fig, subplot
def get_environments_figure(
self,
isite,
plot_type=None,
title="Coordination numbers",
max_dist=2.0,
colormap=None,
figsize=None,
strategy=None,
):
"""
Plotting of the coordination environments of a given site for all the distfactor/angfactor regions. The
chemical environments with the lowest continuous symmetry measure is shown for each distfactor/angfactor
region as the value for the color of that distfactor/angfactor region (using a colormap).
Args:
isite: Index of the site for which the plot has to be done.
plot_type: How to plot the coordinations.
title: Title for the figure.
max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case, the
closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this normalized
distance while in the second case, the real distance is used).
colormap: Color map to be used for the continuous symmetry measure.
figsize: Size of the figure.
strategy: Whether to plot information about one of the Chemenv Strategies.
Returns:
Matplotlib figure and axes representing the environments.
"""
try:
import matplotlib.pyplot as mpl
from matplotlib import cm
from matplotlib.colors import Normalize
from matplotlib.patches import Polygon
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return None
# Initializes the figure
if figsize is None:
fig = mpl.figure()
else:
fig = mpl.figure(figsize=figsize)
subplot = fig.add_subplot(111)
# Initializes the distance and angle parameters
if plot_type is None:
plot_type = {
"distance_parameter": ("initial_normalized", None),
"angle_parameter": ("initial_normalized_inverted", None),
}
if colormap is None:
mycm = cm.jet # pylint: disable=E1101
else:
mycm = colormap
mymin = 0.0
mymax = 10.0
norm = Normalize(vmin=mymin, vmax=mymax)
scalarmap = cm.ScalarMappable(norm=norm, cmap=mycm)
dist_limits = [1.0, max_dist]
ang_limits = [0.0, 1.0]
if plot_type["distance_parameter"][0] == "one_minus_inverse_alpha_power_n":
if plot_type["distance_parameter"][1] is None:
exponent = 3
else:
exponent = plot_type["distance_parameter"][1]["exponent"]
xlabel = f"Distance parameter : $1.0-\\frac{{1.0}}{{\\alpha^{{{exponent:d}}}}}$"
def dp_func(dp):
return 1.0 - 1.0 / np.power(dp, exponent)
elif plot_type["distance_parameter"][0] == "initial_normalized":
xlabel = "Distance parameter : $\\alpha$"
def dp_func(dp):
return dp
else:
raise ValueError(f"Wrong value for distance parameter plot type \"{plot_type['distance_parameter'][0]}\"")
if plot_type["angle_parameter"][0] == "one_minus_gamma":
ylabel = "Angle parameter : $1.0-\\gamma$"
def ap_func(ap):
return 1.0 - ap
elif plot_type["angle_parameter"][0] in [
"initial_normalized_inverted",
"initial_normalized",
]:
ylabel = "Angle parameter : $\\gamma$"
def ap_func(ap):
return ap
else:
raise ValueError(f"Wrong value for angle parameter plot type \"{plot_type['angle_parameter'][0]}\"")
dist_limits = [dp_func(dp) for dp in dist_limits]
ang_limits = [ap_func(ap) for ap in ang_limits]
for cn, cn_nb_sets in self.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(cn_nb_sets):
nb_set_surface_pts = nb_set.voronoi_grid_surface_points()
if nb_set_surface_pts is None:
continue
ce = self.ce_list[isite][cn][inb_set]
if ce is None:
mycolor = "w"
myinvcolor = "k"
mytext = f"{cn:d}"
else:
mingeom = ce.minimum_geometry()
if mingeom is not None:
mp_symbol = mingeom[0]
csm = mingeom[1]["symmetry_measure"]
mycolor = scalarmap.to_rgba(csm)
myinvcolor = [
1.0 - mycolor[0],
1.0 - mycolor[1],
1.0 - mycolor[2],
1.0,
]
mytext = f"{mp_symbol}"
else:
mycolor = "w"
myinvcolor = "k"
mytext = f"{cn:d}"
nb_set_surface_pts = [(dp_func(pt[0]), ap_func(pt[1])) for pt in nb_set_surface_pts]
polygon = Polygon(
nb_set_surface_pts,
closed=True,
edgecolor="k",
facecolor=mycolor,
linewidth=1.2,
)
subplot.add_patch(polygon)
myipt = len(nb_set_surface_pts) / 2
ipt = int(myipt)
if myipt != ipt:
raise RuntimeError("Number of surface points not even")
patch_center = (
(nb_set_surface_pts[0][0] + min(nb_set_surface_pts[ipt][0], dist_limits[1])) / 2,
(nb_set_surface_pts[0][1] + nb_set_surface_pts[ipt][1]) / 2,
)
if (
np.abs(nb_set_surface_pts[-1][1] - nb_set_surface_pts[-2][1]) > 0.06
and np.abs(min(nb_set_surface_pts[-1][0], dist_limits[1]) - nb_set_surface_pts[0][0]) > 0.125
):
xytext = (
(min(nb_set_surface_pts[-1][0], dist_limits[1]) + nb_set_surface_pts[0][0]) / 2,
(nb_set_surface_pts[-1][1] + nb_set_surface_pts[-2][1]) / 2,
)
subplot.annotate(
mytext,
xy=xytext,
ha="center",
va="center",
color=myinvcolor,
fontsize="x-small",
)
elif (
np.abs(nb_set_surface_pts[ipt][1] - nb_set_surface_pts[0][1]) > 0.1
and np.abs(min(nb_set_surface_pts[ipt][0], dist_limits[1]) - nb_set_surface_pts[0][0]) > 0.125
):
xytext = patch_center
subplot.annotate(
mytext,
xy=xytext,
ha="center",
va="center",
color=myinvcolor,
fontsize="x-small",
)
subplot.set_title(title)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
dist_limits.sort()
ang_limits.sort()
subplot.set_xlim(dist_limits)
subplot.set_ylim(ang_limits)
if strategy is not None:
try:
strategy.add_strategy_visualization_to_subplot(subplot=subplot)
except Exception:
pass
if plot_type["angle_parameter"][0] == "initial_normalized_inverted":
subplot.axes.invert_yaxis()
scalarmap.set_array([mymin, mymax])
cb = fig.colorbar(scalarmap, ax=subplot, extend="max")
cb.set_label("Continuous symmetry measure")
return fig, subplot
def plot_environments(
self,
isite,
plot_type=None,
title="Coordination numbers",
max_dist=2.0,
figsize=None,
strategy=None,
):
"""
Plotting of the coordination numbers of a given site for all the distfactor/angfactor parameters. If the
chemical environments are given, a color map is added to the plot, with the lowest continuous symmetry measure
as the value for the color of that distfactor/angfactor set.
Args:
isite: Index of the site for which the plot has to be done.
plot_type: How to plot the coordinations.
title: Title for the figure.
max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case, the
closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this normalized
distance while in the second case, the real distance is used).
figsize: Size of the figure.
strategy: Whether to plot information about one of the Chemenv Strategies.
"""
fig, subplot = self.get_environments_figure(
isite=isite,
plot_type=plot_type,
title=title,
max_dist=max_dist,
figsize=figsize,
strategy=strategy,
)
if fig is None:
return
fig.show()
def save_environments_figure(
self,
isite,
imagename="image.png",
plot_type=None,
title="Coordination numbers",
max_dist=2.0,
figsize=None,
):
"""
Saves the environments figure to a given file.
Args:
isite: Index of the site for which the plot has to be done.
imagename: Name of the file to which the figure has to be saved.
plot_type: How to plot the coordinations.
title: Title for the figure.
max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case, the
closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this normalized
distance while in the second case, the real distance is used).
figsize: Size of the figure.
"""
fig, subplot = self.get_environments_figure(
isite=isite,
plot_type=plot_type,
title=title,
max_dist=max_dist,
figsize=figsize,
)
if fig is None:
return
fig.savefig(imagename)
def differences_wrt(self, other):
"""
Return differences found in the current StructureEnvironments with respect to another StructureEnvironments.
Args:
other: A StructureEnvironments object.
Returns:
List of differences between the two StructureEnvironments objects.
"""
differences = []
if self.structure != other.structure:
differences.append(
{
"difference": "structure",
"comparison": "__eq__",
"self": self.structure,
"other": other.structure,
}
)
differences.append(
{
"difference": "PREVIOUS DIFFERENCE IS DISMISSIVE",
"comparison": "differences_wrt",
}
)
return differences
if self.valences != other.valences:
differences.append(
{
"difference": "valences",
"comparison": "__eq__",
"self": self.valences,
"other": other.valences,
}
)
if self.info != other.info:
differences.append(
{
"difference": "info",
"comparison": "__eq__",
"self": self.info,
"other": other.info,
}
)
if self.voronoi != other.voronoi:
if self.voronoi.is_close_to(other.voronoi):
differences.append(
{
"difference": "voronoi",
"comparison": "__eq__",
"self": self.voronoi,
"other": other.voronoi,
}
)
differences.append(
{
"difference": "PREVIOUS DIFFERENCE IS DISMISSIVE",
"comparison": "differences_wrt",
}
)
return differences
differences.append(
{
"difference": "voronoi",
"comparison": "is_close_to",
"self": self.voronoi,
"other": other.voronoi,
}
)
# TODO: make it possible to have "close" voronoi's
differences.append(
{
"difference": "PREVIOUS DIFFERENCE IS DISMISSIVE",
"comparison": "differences_wrt",
}
)
return differences
for isite, self_site_nb_sets in enumerate(self.neighbors_sets):
other_site_nb_sets = other.neighbors_sets[isite]
if self_site_nb_sets is None:
if other_site_nb_sets is None:
continue
differences.append(
{
"difference": f"neighbors_sets[isite={isite:d}]",
"comparison": "has_neighbors",
"self": "None",
"other": set(other_site_nb_sets.keys()),
}
)
continue
if other_site_nb_sets is None:
differences.append(
{
"difference": f"neighbors_sets[isite={isite:d}]",
"comparison": "has_neighbors",
"self": set(self_site_nb_sets.keys()),
"other": "None",
}
)
continue
self_site_cns = set(self_site_nb_sets.keys())
other_site_cns = set(other_site_nb_sets.keys())
if self_site_cns != other_site_cns:
differences.append(
{
"difference": f"neighbors_sets[isite={isite:d}]",
"comparison": "coordination_numbers",
"self": self_site_cns,
"other": other_site_cns,
}
)
common_cns = self_site_cns.intersection(other_site_cns)
for cn in common_cns:
other_site_cn_nb_sets = other_site_nb_sets[cn]
self_site_cn_nb_sets = self_site_nb_sets[cn]
set_self_site_cn_nb_sets = set(self_site_cn_nb_sets)
set_other_site_cn_nb_sets = set(other_site_cn_nb_sets)
if set_self_site_cn_nb_sets != set_other_site_cn_nb_sets:
differences.append(
{
"difference": f"neighbors_sets[isite={isite:d}][cn={cn:d}]",
"comparison": "neighbors_sets",
"self": self_site_cn_nb_sets,
"other": other_site_cn_nb_sets,
}
)
common_nb_sets = set_self_site_cn_nb_sets.intersection(set_other_site_cn_nb_sets)
for nb_set in common_nb_sets:
inb_set_self = self_site_cn_nb_sets.index(nb_set)
inb_set_other = other_site_cn_nb_sets.index(nb_set)
self_ce = self.ce_list[isite][cn][inb_set_self]
other_ce = other.ce_list[isite][cn][inb_set_other]
if self_ce != other_ce:
if self_ce.is_close_to(other_ce):
differences.append(
{
"difference": "ce_list[isite={:d}][cn={:d}]"
"[inb_set={:d}]".format(isite, cn, inb_set_self),
"comparison": "__eq__",
"self": self_ce,
"other": other_ce,
}
)
else:
differences.append(
{
"difference": "ce_list[isite={:d}][cn={:d}]"
"[inb_set={:d}]".format(isite, cn, inb_set_self),
"comparison": "is_close_to",
"self": self_ce,
"other": other_ce,
}
)
return differences
def __eq__(self, other):
if len(self.ce_list) != len(other.ce_list):
return False
if self.voronoi != other.voronoi:
return False
if len(self.valences) != len(other.valences):
return False
if self.sites_map != other.sites_map:
return False
if self.equivalent_sites != other.equivalent_sites:
return False
if self.structure != other.structure:
return False
if self.info != other.info:
return False
for isite, site_ces in enumerate(self.ce_list):
site_nb_sets_self = self.neighbors_sets[isite]
site_nb_sets_other = other.neighbors_sets[isite]
if site_nb_sets_self != site_nb_sets_other:
return False
if site_ces != other.ce_list[isite]:
return False
return True
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the StructureEnvironments object.
Returns:
Bson-serializable dict representation of the StructureEnvironments object.
"""
ce_list_dict = [
{str(cn): [ce.as_dict() if ce is not None else None for ce in ce_dict[cn]] for cn in ce_dict}
if ce_dict is not None
else None
for ce_dict in self.ce_list
]
nbs_sets_dict = [
{str(cn): [nb_set.as_dict() for nb_set in nb_sets] for cn, nb_sets in site_nbs_sets.items()}
if site_nbs_sets is not None
else None
for site_nbs_sets in self.neighbors_sets
]
info_dict = {key: val for key, val in self.info.items() if key not in ["sites_info"]}
info_dict["sites_info"] = [
{
"nb_sets_info": {
str(cn): {str(inb_set): nb_set_info for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info["nb_sets_info"].items()
},
"time": site_info["time"],
}
if "nb_sets_info" in site_info
else {}
for site_info in self.info["sites_info"]
]
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"voronoi": self.voronoi.as_dict(),
"valences": self.valences,
"sites_map": self.sites_map,
"equivalent_sites": [[ps.as_dict() for ps in psl] for psl in self.equivalent_sites],
"ce_list": ce_list_dict,
"structure": self.structure.as_dict(),
"neighbors_sets": nbs_sets_dict,
"info": info_dict,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the StructureEnvironments object from a dict representation of the StructureEnvironments created
using the as_dict method.
Args:
d: dict representation of the StructureEnvironments object.
Returns:
StructureEnvironments object.
"""
ce_list = [
None
if (ce_dict == "None" or ce_dict is None)
else {
int(cn): [
None if (ced is None or ced == "None") else ChemicalEnvironments.from_dict(ced)
for ced in ce_dict[cn]
]
for cn in ce_dict
}
for ce_dict in d["ce_list"]
]
voronoi = DetailedVoronoiContainer.from_dict(d["voronoi"])
structure = Structure.from_dict(d["structure"])
neighbors_sets = [
{
int(cn): [
cls.NeighborsSet.from_dict(dd=nb_set_dict, structure=structure, detailed_voronoi=voronoi)
for nb_set_dict in nb_sets
]
for cn, nb_sets in site_nbs_sets_dict.items()
}
if site_nbs_sets_dict is not None
else None
for site_nbs_sets_dict in d["neighbors_sets"]
]
info = {key: val for key, val in d["info"].items() if key not in ["sites_info"]}
if "sites_info" in d["info"]:
info["sites_info"] = [
{
"nb_sets_info": {
int(cn): {int(inb_set): nb_set_info for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info["nb_sets_info"].items()
},
"time": site_info["time"],
}
if "nb_sets_info" in site_info
else {}
for site_info in d["info"]["sites_info"]
]
return cls(
voronoi=voronoi,
valences=d["valences"],
sites_map=d["sites_map"],
equivalent_sites=[[PeriodicSite.from_dict(psd) for psd in psl] for psl in d["equivalent_sites"]],
ce_list=ce_list,
structure=structure,
neighbors_sets=neighbors_sets,
info=info,
)
class LightStructureEnvironments(MSONable):
"""
Class used to store the chemical environments of a given structure obtained from a given ChemenvStrategy. Currently,
only strategies leading to the determination of a unique environment for each site is allowed
This class does not store all the information contained in the StructureEnvironments object, only the coordination
environment found.
"""
DELTA_MAX_OXIDATION_STATE = 0.1
DEFAULT_STATISTICS_FIELDS = [
"anion_list",
"anion_atom_list",
"cation_list",
"cation_atom_list",
"neutral_list",
"neutral_atom_list",
"atom_coordination_environments_present",
"ion_coordination_environments_present",
"fraction_atom_coordination_environments_present",
"fraction_ion_coordination_environments_present",
"coordination_environments_atom_present",
"coordination_environments_ion_present",
]
class NeighborsSet:
"""
Class used to store a given set of neighbors of a given site (based on a list of sites, the voronoi
container is not part of the LightStructureEnvironments object).
"""
def __init__(self, structure, isite, all_nbs_sites, all_nbs_sites_indices):
"""
Constructor for NeighborsSet.
Args:
structure: Structure object.
isite: Index of the site for which neighbors are stored in this NeighborsSet.
all_nbs_sites: All the possible neighbors for this site.
all_nbs_sites_indices: Indices of the sites in all_nbs_sites that make up this NeighborsSet.
"""
self.structure = structure
self.isite = isite
self.all_nbs_sites = all_nbs_sites
myset = set(all_nbs_sites_indices)
if len(myset) != len(all_nbs_sites_indices):
raise ValueError("Set of neighbors contains duplicates !")
self.all_nbs_sites_indices = sorted(myset)
self.all_nbs_sites_indices_unsorted = all_nbs_sites_indices
self.all_nbs_sites_indices_and_image = []
@property
def neighb_coords(self):
"""
Coordinates of neighbors for this NeighborsSet.
"""
return [self.all_nbs_sites[inb]["site"].coords for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_sites(self):
"""
Neighbors for this NeighborsSet as pymatgen Sites.
"""
return [self.all_nbs_sites[inb]["site"] for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_sites_and_indices(self):
"""
List of neighbors for this NeighborsSet as pymatgen Sites and their index in the original structure.
"""
return [
{
"site": self.all_nbs_sites[inb]["site"],
"index": self.all_nbs_sites[inb]["index"],
}
for inb in self.all_nbs_sites_indices_unsorted
]
@property
def neighb_indices_and_images(self):
"""
List of indices and images with respect to the original unit cell sites for this NeighborsSet.
"""
return [
{
"index": self.all_nbs_sites[inb]["index"],
"image_cell": self.all_nbs_sites[inb]["image_cell"],
}
for inb in self.all_nbs_sites_indices_unsorted
]
def __len__(self):
return len(self.all_nbs_sites_indices)
def __hash__(self):
return len(self.all_nbs_sites_indices)
def __eq__(self, other):
return self.isite == other.isite and self.all_nbs_sites_indices == other.all_nbs_sites_indices
def __ne__(self, other):
return not self == other
def __str__(self):
out = f"Neighbors Set for site #{self.isite:d} :\n"
out += f" - Coordination number : {len(self):d}\n"
out += " - Neighbors sites indices : {}\n".format(
", ".join([f"{nb_list_index:d}" for nb_list_index in self.all_nbs_sites_indices])
)
return out
def as_dict(self):
"""
A JSON serializable dict representation of the NeighborsSet.
"""
return {
"isite": self.isite,
"all_nbs_sites_indices": self.all_nbs_sites_indices_unsorted,
}
# 'all_nbs_sites_indices_unsorted': self.all_nbs_sites_indices_unsorted}
@classmethod
def from_dict(cls, dd, structure, all_nbs_sites):
"""
Reconstructs the NeighborsSet algorithm from its JSON serializable dict representation, together with
the structure and all the possible neighbors sites.
As an inner (nested) class, the NeighborsSet is not supposed to be used anywhere else that inside the
LightStructureEnvironments. The from_dict method is thus using the structure and all_nbs_sites when
reconstructing itself. These two are both in the LightStructureEnvironments object.
Args:
dd: a JSON serializable dict representation of a NeighborsSet.
structure: The structure.
all_nbs_sites: The list of all the possible neighbors for a given site.
Returns: a NeighborsSet.
"""
return cls(
structure=structure,
isite=dd["isite"],
all_nbs_sites=all_nbs_sites,
all_nbs_sites_indices=dd["all_nbs_sites_indices"],
)
def __init__(
self,
strategy,
coordination_environments=None,
all_nbs_sites=None,
neighbors_sets=None,
structure=None,
valences=None,
valences_origin=None,
):
"""
Constructor for the LightStructureEnvironments object.
Args:
strategy: ChemEnv strategy used to get the environments.
coordination_environments: The coordination environments identified.
all_nbs_sites: All the possible neighbors for each site in the structure.
neighbors_sets: The neighbors sets of each site in the structure.
structure: The structure.
valences: The valences used to get the environments (if needed).
valences_origin: How the valences were obtained (e.g. from the Bond-valence analysis or from the original
structure).
"""
self.strategy = strategy
self.statistics_dict = None
self.coordination_environments = coordination_environments
self._all_nbs_sites = all_nbs_sites
self.neighbors_sets = neighbors_sets
self.structure = structure
self.valences = valences
self.valences_origin = valences_origin
@classmethod
def from_structure_environments(cls, strategy, structure_environments, valences=None, valences_origin=None):
"""
Construct a LightStructureEnvironments object from a strategy and a StructureEnvironments object.
Args:
strategy: ChemEnv strategy used.
structure_environments: StructureEnvironments object from which to construct the LightStructureEnvironments.
valences: The valences of each site in the structure.
valences_origin: How the valences were obtained (e.g. from the Bond-valence analysis or from the original
structure).
Returns: a LightStructureEnvironments object.
"""
structure = structure_environments.structure
strategy.set_structure_environments(structure_environments=structure_environments)
coordination_environments = [None] * len(structure)
neighbors_sets = [None] * len(structure)
_all_nbs_sites = []
my_all_nbs_sites = []
if valences is None:
valences = structure_environments.valences
if valences_origin is None:
valences_origin = "from_structure_environments"
else:
if valences_origin is None:
valences_origin = "user-specified"
for isite, site in enumerate(structure):
site_ces_and_nbs_list = strategy.get_site_ce_fractions_and_neighbors(site, strategy_info=True)
if site_ces_and_nbs_list is None:
continue
coordination_environments[isite] = []
neighbors_sets[isite] = []
site_ces = []
site_nbs_sets = []
for ce_and_neighbors in site_ces_and_nbs_list:
_all_nbs_sites_indices = []
# Coordination environment
ce_dict = {
"ce_symbol": ce_and_neighbors["ce_symbol"],
"ce_fraction": ce_and_neighbors["ce_fraction"],
}
if ce_and_neighbors["ce_dict"] is not None:
csm = ce_and_neighbors["ce_dict"]["other_symmetry_measures"][strategy.symmetry_measure_type]
else:
csm = None
ce_dict["csm"] = csm
ce_dict["permutation"] = ce_and_neighbors["ce_dict"]["permutation"]
site_ces.append(ce_dict)
# Neighbors
neighbors = ce_and_neighbors["neighbors"]
for nb_site_and_index in neighbors:
nb_site = nb_site_and_index["site"]
try:
nb_allnbs_sites_index = my_all_nbs_sites.index(nb_site)
except ValueError:
nb_index_unitcell = nb_site_and_index["index"]
diff = nb_site.frac_coords - structure[nb_index_unitcell].frac_coords
rounddiff = np.round(diff)
if not np.allclose(diff, rounddiff):
raise ValueError(
"Weird, differences between one site in a periodic image cell is not integer ..."
)
nb_image_cell = np.array(rounddiff, int)
nb_allnbs_sites_index = len(_all_nbs_sites)
_all_nbs_sites.append(
{
"site": nb_site,
"index": nb_index_unitcell,
"image_cell": nb_image_cell,
}
)
my_all_nbs_sites.append(nb_site)
_all_nbs_sites_indices.append(nb_allnbs_sites_index)
nb_set = cls.NeighborsSet(
structure=structure,
isite=isite,
all_nbs_sites=_all_nbs_sites,
all_nbs_sites_indices=_all_nbs_sites_indices,
)
site_nbs_sets.append(nb_set)
coordination_environments[isite] = site_ces
neighbors_sets[isite] = site_nbs_sets
return cls(
strategy=strategy,
coordination_environments=coordination_environments,
all_nbs_sites=_all_nbs_sites,
neighbors_sets=neighbors_sets,
structure=structure,
valences=valences,
valences_origin=valences_origin,
)
def setup_statistic_lists(self):
"""
Set up the statistics of environments for this LightStructureEnvironments.
"""
self.statistics_dict = {
"valences_origin": self.valences_origin,
"anion_list": {}, # OK
"anion_number": None, # OK
"anion_atom_list": {}, # OK
"anion_atom_number": None, # OK
"cation_list": {}, # OK
"cation_number": None, # OK
"cation_atom_list": {}, # OK
"cation_atom_number": None, # OK
"neutral_list": {}, # OK
"neutral_number": None, # OK
"neutral_atom_list": {}, # OK
"neutral_atom_number": None, # OK
"atom_coordination_environments_present": {}, # OK
"ion_coordination_environments_present": {}, # OK
"coordination_environments_ion_present": {}, # OK
"coordination_environments_atom_present": {}, # OK
"fraction_ion_coordination_environments_present": {}, # OK
"fraction_atom_coordination_environments_present": {}, # OK
"fraction_coordination_environments_ion_present": {}, # OK
"fraction_coordination_environments_atom_present": {}, # OK
"count_ion_present": {}, # OK
"count_atom_present": {}, # OK
"count_coordination_environments_present": {},
}
atom_stat = self.statistics_dict["atom_coordination_environments_present"]
ce_atom_stat = self.statistics_dict["coordination_environments_atom_present"]
fraction_atom_stat = self.statistics_dict["fraction_atom_coordination_environments_present"]
fraction_ce_atom_stat = self.statistics_dict["fraction_coordination_environments_atom_present"]
count_atoms = self.statistics_dict["count_atom_present"]
count_ce = self.statistics_dict["count_coordination_environments_present"]
for isite, site in enumerate(self.structure):
# Building anion and cation list
site_species = []
if self.valences != "undefined":
for sp, occ in site.species.items():
valence = self.valences[isite]
strspecie = str(Species(sp.symbol, valence))
if valence < 0:
specielist = self.statistics_dict["anion_list"]
atomlist = self.statistics_dict["anion_atom_list"]
elif valence > 0:
specielist = self.statistics_dict["cation_list"]
atomlist = self.statistics_dict["cation_atom_list"]
else:
specielist = self.statistics_dict["neutral_list"]
atomlist = self.statistics_dict["neutral_atom_list"]
if strspecie not in specielist:
specielist[strspecie] = occ
else:
specielist[strspecie] += occ
if sp.symbol not in atomlist:
atomlist[sp.symbol] = occ
else:
atomlist[sp.symbol] += occ
site_species.append((sp.symbol, valence, occ))
# Building environments lists
if self.coordination_environments[isite] is not None:
site_envs = [
(ce_piece_dict["ce_symbol"], ce_piece_dict["ce_fraction"])
for ce_piece_dict in self.coordination_environments[isite]
]
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in count_ce:
count_ce[ce_symbol] = 0.0
count_ce[ce_symbol] += fraction
for sp, occ in site.species.items():
elmt = sp.symbol
if elmt not in atom_stat:
atom_stat[elmt] = {}
count_atoms[elmt] = 0.0
count_atoms[elmt] += occ
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in atom_stat[elmt]:
atom_stat[elmt][ce_symbol] = 0.0
atom_stat[elmt][ce_symbol] += occ * fraction
if ce_symbol not in ce_atom_stat:
ce_atom_stat[ce_symbol] = {}
if elmt not in ce_atom_stat[ce_symbol]:
ce_atom_stat[ce_symbol][elmt] = 0.0
ce_atom_stat[ce_symbol][elmt] += occ * fraction
if self.valences != "undefined":
ion_stat = self.statistics_dict["ion_coordination_environments_present"]
ce_ion_stat = self.statistics_dict["coordination_environments_ion_present"]
count_ions = self.statistics_dict["count_ion_present"]
for elmt, oxi_state, occ in site_species:
if elmt not in ion_stat:
ion_stat[elmt] = {}
count_ions[elmt] = {}
if oxi_state not in ion_stat[elmt]:
ion_stat[elmt][oxi_state] = {}
count_ions[elmt][oxi_state] = 0.0
count_ions[elmt][oxi_state] += occ
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in ion_stat[elmt][oxi_state]:
ion_stat[elmt][oxi_state][ce_symbol] = 0.0
ion_stat[elmt][oxi_state][ce_symbol] += occ * fraction
if ce_symbol not in ce_ion_stat:
ce_ion_stat[ce_symbol] = {}
if elmt not in ce_ion_stat[ce_symbol]:
ce_ion_stat[ce_symbol][elmt] = {}
if oxi_state not in ce_ion_stat[ce_symbol][elmt]:
ce_ion_stat[ce_symbol][elmt][oxi_state] = 0.0
ce_ion_stat[ce_symbol][elmt][oxi_state] += occ * fraction
self.statistics_dict["anion_number"] = len(self.statistics_dict["anion_list"])
self.statistics_dict["anion_atom_number"] = len(self.statistics_dict["anion_atom_list"])
self.statistics_dict["cation_number"] = len(self.statistics_dict["cation_list"])
self.statistics_dict["cation_atom_number"] = len(self.statistics_dict["cation_atom_list"])
self.statistics_dict["neutral_number"] = len(self.statistics_dict["neutral_list"])
self.statistics_dict["neutral_atom_number"] = len(self.statistics_dict["neutral_atom_list"])
for elmt, envs in atom_stat.items():
sumelement = count_atoms[elmt]
fraction_atom_stat[elmt] = {env: fraction / sumelement for env, fraction in envs.items()}
for ce_symbol, atoms in ce_atom_stat.items():
sumsymbol = count_ce[ce_symbol]
fraction_ce_atom_stat[ce_symbol] = {atom: fraction / sumsymbol for atom, fraction in atoms.items()}
ion_stat = self.statistics_dict["ion_coordination_environments_present"]
fraction_ion_stat = self.statistics_dict["fraction_ion_coordination_environments_present"]
ce_ion_stat = self.statistics_dict["coordination_environments_ion_present"]
fraction_ce_ion_stat = self.statistics_dict["fraction_coordination_environments_ion_present"]
count_ions = self.statistics_dict["count_ion_present"]
for elmt, oxi_states_envs in ion_stat.items():
fraction_ion_stat[elmt] = {}
for oxi_state, envs in oxi_states_envs.items():
sumspecie = count_ions[elmt][oxi_state]
fraction_ion_stat[elmt][oxi_state] = {env: fraction / sumspecie for env, fraction in envs.items()}
for ce_symbol, ions in ce_ion_stat.items():
fraction_ce_ion_stat[ce_symbol] = {}
sum_ce = np.sum([np.sum(list(oxistates.values())) for elmt, oxistates in ions.items()])
for elmt, oxistates in ions.items():
fraction_ce_ion_stat[ce_symbol][elmt] = {
oxistate: fraction / sum_ce for oxistate, fraction in oxistates.items()
}
def get_site_info_for_specie_ce(self, specie, ce_symbol):
"""
Get list of indices that have the given specie with a given Coordination environment.
Args:
specie: Species to get.
ce_symbol: Symbol of the coordination environment to get.
Returns: Dictionary with the list of indices in the structure that have the given specie in the given
environment, their fraction and continuous symmetry measures.
"""
element = specie.symbol
oxi_state = specie.oxi_state
isites = []
csms = []
fractions = []
for isite, site in enumerate(self.structure):
if element in [sp.symbol for sp in site.species]:
if self.valences == "undefined" or oxi_state == self.valences[isite]:
for ce_dict in self.coordination_environments[isite]:
if ce_symbol == ce_dict["ce_symbol"]:
isites.append(isite)
csms.append(ce_dict["csm"])
fractions.append(ce_dict["ce_fraction"])
return {"isites": isites, "fractions": fractions, "csms": csms}
def get_site_info_for_specie_allces(self, specie, min_fraction=0.0):
"""
Get list of indices that have the given specie.
Args:
specie: Species to get.
Returns: Dictionary with the list of coordination environments for the given species, the indices of the sites
in which they appear, their fractions and continuous symmetry measures.
"""
allces = {}
element = specie.symbol
oxi_state = specie.oxi_state
for isite, site in enumerate(self.structure):
if element in [sp.symbol for sp in site.species]:
if self.valences == "undefined" or oxi_state == self.valences[isite]:
if self.coordination_environments[isite] is None:
continue
for ce_dict in self.coordination_environments[isite]:
if ce_dict["ce_fraction"] < min_fraction:
continue
if ce_dict["ce_symbol"] not in allces:
allces[ce_dict["ce_symbol"]] = {
"isites": [],
"fractions": [],
"csms": [],
}
allces[ce_dict["ce_symbol"]]["isites"].append(isite)
allces[ce_dict["ce_symbol"]]["fractions"].append(ce_dict["ce_fraction"])
allces[ce_dict["ce_symbol"]]["csms"].append(ce_dict["csm"])
return allces
def get_statistics(self, statistics_fields=DEFAULT_STATISTICS_FIELDS, bson_compatible=False):
"""
Get the statistics of environments for this structure.
Args:
statistics_fields: Which statistics to get.
bson_compatible: Whether to make the dictionary BSON-compatible.
Returns:
A dictionary with the requested statistics.
"""
if self.statistics_dict is None:
self.setup_statistic_lists()
if statistics_fields == "ALL":
statistics_fields = list(self.statistics_dict.keys())
if bson_compatible:
dd = jsanitize({field: self.statistics_dict[field] for field in statistics_fields})
else:
dd = {field: self.statistics_dict[field] for field in statistics_fields}
return dd
def contains_only_one_anion_atom(self, anion_atom):
"""
Whether this LightStructureEnvironments concerns a structure with only one given anion atom type.
Args:
anion_atom: Anion (e.g. O, ...). The structure could contain O2- and O- though.
Returns: True if this LightStructureEnvironments concerns a structure with only one given anion_atom.
"""
return (
len(self.statistics_dict["anion_atom_list"]) == 1 and anion_atom in self.statistics_dict["anion_atom_list"]
)
def contains_only_one_anion(self, anion):
"""
Whether this LightStructureEnvironments concerns a structure with only one given anion type.
Args:
anion: Anion (e.g. O2-, ...).
Returns: True if this LightStructureEnvironments concerns a structure with only one given anion.
"""
return len(self.statistics_dict["anion_list"]) == 1 and anion in self.statistics_dict["anion_list"]
def site_contains_environment(self, isite, ce_symbol):
"""
Whether a given site contains a given coordination environment.
Args:
isite: Index of the site.
ce_symbol: Symbol of the coordination environment.
Returns: True if the site contains the given coordination environment.
"""
if self.coordination_environments[isite] is None:
return False
return ce_symbol in [ce_dict["ce_symbol"] for ce_dict in self.coordination_environments[isite]]
def site_has_clear_environment(self, isite, conditions=None):
"""
Whether a given site has a "clear" environments.
A "clear" environment is somewhat arbitrary. You can pass (multiple) conditions, e.g. the environment should
have a continuous symmetry measure lower than this, a fraction higher than that, ...
Args:
isite: Index of the site.
conditions: Conditions to be checked for an environment to be "clear".
Returns: True if the site has a clear environment.
"""
if self.coordination_environments[isite] is None:
raise ValueError(f"Coordination environments have not been determined for site {isite:d}")
if conditions is None:
return len(self.coordination_environments[isite]) == 1
ce = max(self.coordination_environments[isite], key=lambda x: x["ce_fraction"])
for condition in conditions:
target = condition["target"]
if target == "ce_fraction":
if ce[target] < condition["minvalue"]:
return False
elif target == "csm":
if ce[target] > condition["maxvalue"]:
return False
elif target == "number_of_ces":
if ce[target] > condition["maxnumber"]:
return False
else:
raise ValueError(f'Target "{target}" for condition of clear environment is not allowed')
return True
def structure_has_clear_environments(self, conditions=None, skip_none=True, skip_empty=False):
"""
Whether all sites in a structure have "clear" environments.
Args:
conditions: Conditions to be checked for an environment to be "clear".
skip_none: Whether to skip sites for which no environments have been computed.
skip_empty: Whether to skip sites for which no environments could be found.
Returns: True if all the sites in the structure have clear environments.
"""
for isite in range(len(self.structure)):
if self.coordination_environments[isite] is None:
if skip_none:
continue
return False
if len(self.coordination_environments[isite]) == 0:
if skip_empty:
continue
return False
if not self.site_has_clear_environment(isite=isite, conditions=conditions):
return False
return True
def clear_environments(self, conditions=None):
"""
Get the clear environments in the structure.
Args:
conditions: Conditions to be checked for an environment to be "clear".
Returns: Set of clear environments in this structure.
"""
clear_envs_list = set()
for isite in range(len(self.structure)):
if self.coordination_environments[isite] is None:
continue
if len(self.coordination_environments[isite]) == 0:
continue
if self.site_has_clear_environment(isite=isite, conditions=conditions):
ce = max(
self.coordination_environments[isite],
key=lambda x: x["ce_fraction"],
)
clear_envs_list.add(ce["ce_symbol"])
return list(clear_envs_list)
def structure_contains_atom_environment(self, atom_symbol, ce_symbol):
"""
Checks whether the structure contains a given atom in a given environment.
Args:
atom_symbol: Symbol of the atom.
ce_symbol: Symbol of the coordination environment.
Returns:
True if the coordination environment is found, False otherwise
"""
for isite, site in enumerate(self.structure):
if Element(atom_symbol) in site.species.element_composition and self.site_contains_environment(
isite, ce_symbol
):
return True
return False
def environments_identified(self):
"""
Return the set of environments identified in this structure.
Returns: Set of environments identified in this structure.
"""
return {ce["ce_symbol"] for celist in self.coordination_environments if celist is not None for ce in celist}
@property
def uniquely_determines_coordination_environments(self):
"""
True if the coordination environments are uniquely determined.
"""
return self.strategy.uniquely_determines_coordination_environments
def __eq__(self, other):
"""
Equality method that checks if the LightStructureEnvironments object is equal to another
LightStructureEnvironments object. Two LightStructureEnvironments objects are equal if the strategy used
is the same, if the structure is the same, if the valences used in the strategies are the same, if the
coordination environments and the neighbours determined by the strategy are the same.
Args:
other: LightStructureEnvironments object to compare with.
Returns:
True if both objects are equal, False otherwise.
"""
is_equal = (
self.strategy == other.strategy
and self.structure == other.structure
and self.coordination_environments == other.coordination_environments
and self.valences == other.valences
and self.neighbors_sets == other.neighbors_sets
)
this_sites = [ss["site"] for ss in self._all_nbs_sites]
other_sites = [ss["site"] for ss in other._all_nbs_sites]
this_indices = [ss["index"] for ss in self._all_nbs_sites]
other_indices = [ss["index"] for ss in other._all_nbs_sites]
return is_equal and this_sites == other_sites and this_indices == other_indices
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the LightStructureEnvironments object.
Returns:
Bson-serializable dict representation of the LightStructureEnvironments object.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"strategy": self.strategy.as_dict(),
"structure": self.structure.as_dict(),
"coordination_environments": self.coordination_environments,
"all_nbs_sites": [
{
"site": nb_site["site"].as_dict(),
"index": nb_site["index"],
"image_cell": [int(ii) for ii in nb_site["image_cell"]],
}
for nb_site in self._all_nbs_sites
],
"neighbors_sets": [
[nb_set.as_dict() for nb_set in site_nb_sets] if site_nb_sets is not None else None
for site_nb_sets in self.neighbors_sets
],
"valences": self.valences,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the LightStructureEnvironments object from a dict representation of the
LightStructureEnvironments created using the as_dict method.
Args:
d: dict representation of the LightStructureEnvironments object.
Returns:
LightStructureEnvironments object.
"""
dec = MontyDecoder()
structure = dec.process_decoded(d["structure"])
all_nbs_sites = []
for nb_site in d["all_nbs_sites"]:
site = dec.process_decoded(nb_site["site"])
if "image_cell" in nb_site:
image_cell = np.array(nb_site["image_cell"], int)
else:
diff = site.frac_coords - structure[nb_site["index"]].frac_coords
rounddiff = np.round(diff)
if not np.allclose(diff, rounddiff):
raise ValueError("Weird, differences between one site in a periodic image cell is not integer ...")
image_cell = np.array(rounddiff, int)
all_nbs_sites.append({"site": site, "index": nb_site["index"], "image_cell": image_cell})
neighbors_sets = [
[
cls.NeighborsSet.from_dict(dd=nb_set, structure=structure, all_nbs_sites=all_nbs_sites)
for nb_set in site_nb_sets
]
if site_nb_sets is not None
else None
for site_nb_sets in d["neighbors_sets"]
]
return cls(
strategy=dec.process_decoded(d["strategy"]),
coordination_environments=d["coordination_environments"],
all_nbs_sites=all_nbs_sites,
neighbors_sets=neighbors_sets,
structure=structure,
valences=d["valences"],
)
class ChemicalEnvironments(MSONable):
"""
Class used to store all the information about the chemical environment of a given site for a given list of
coordinated neighbours (internally called "cn_map").
"""
def __init__(self, coord_geoms=None):
"""
Initializes the ChemicalEnvironments object containing all the information about the chemical
environment of a given site.
Args:
coord_geoms: coordination geometries to be added to the chemical environment.
"""
if coord_geoms is None:
self.coord_geoms = {}
else:
raise NotImplementedError(
"Constructor for ChemicalEnvironments with the coord_geoms argument is not yet implemented"
)
def __getitem__(self, mp_symbol):
return self.coord_geoms[mp_symbol]
def __len__(self):
"""
Returns the number of coordination geometries in this ChemicalEnvironments object.
Returns:
Number of coordination geometries in this ChemicalEnvironments object.
"""
return len(self.coord_geoms)
def __iter__(self):
yield from self.coord_geoms.items()
def minimum_geometry(self, symmetry_measure_type=None, max_csm=None):
"""
Returns the geometry with the minimum continuous symmetry measure of this ChemicalEnvironments.
Returns:
tuple (symbol, csm) with symbol being the geometry with the minimum continuous symmetry measure and
csm being the continuous symmetry measure associated to it.
Raises:
ValueError if no coordination geometry is found in this ChemicalEnvironments object.
"""
if len(self.coord_geoms) == 0:
return None
cglist = list(self.coord_geoms)
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]["other_symmetry_measures"]["csm_wcs_ctwcc"] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]["other_symmetry_measures"][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
imin = np.argmin(csms)
if max_csm is not None:
if csmlist[imin] > max_csm:
return None
return cglist[imin], csmlist[imin]
def minimum_geometries(self, n=None, symmetry_measure_type=None, max_csm=None):
"""
Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object.
Args:
n: Number of geometries to be included in the list.
Returns:
List of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object.
Raises:
ValueError if no coordination geometry is found in this ChemicalEnvironments object.
"""
cglist = list(self.coord_geoms)
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]["other_symmetry_measures"]["csm_wcs_ctwcc"] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]["other_symmetry_measures"][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
isorted = np.argsort(csms)
if max_csm is not None:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted if csms[ii] <= max_csm]
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n] if csms[ii] <= max_csm]
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted]
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n]]
def add_coord_geom(
self,
mp_symbol,
symmetry_measure,
algo="UNKNOWN",
permutation=None,
override=False,
local2perfect_map=None,
perfect2local_map=None,
detailed_voronoi_index=None,
other_symmetry_measures=None,
rotation_matrix=None,
scaling_factor=None,
):
"""
Adds a coordination geometry to the ChemicalEnvironments object.
Args:
mp_symbol: Symbol of the coordination geometry added.
symmetry_measure: Symmetry measure of the coordination geometry added.
algo: Algorithm used for the search of the coordination geometry added.
permutation: Permutation of the neighbors that leads to the csm stored.
override: If set to True, the coordination geometry will override the existent one if present.
local2perfect_map: Mapping of the local indices to the perfect indices.
perfect2local_map: Mapping of the perfect indices to the local indices.
detailed_voronoi_index: Index in the voronoi containing the neighbors set.
other_symmetry_measures: Other symmetry measure of the coordination geometry added (with/without the
central atom, centered on the central atom or on the centroid with/without the central atom).
rotation_matrix: Rotation matrix mapping the local geometry to the perfect geometry.
scaling_factor: Scaling factor mapping the local geometry to the perfect geometry.
Raises:
ChemenvError if the coordination geometry is already added and override is set to False
"""
if not allcg.is_a_valid_coordination_geometry(mp_symbol=mp_symbol):
raise ChemenvError(
self.__class__,
"add_coord_geom",
f'Coordination geometry with mp_symbol "{mp_symbol}" is not valid',
)
if mp_symbol in list(self.coord_geoms.keys()) and not override:
raise ChemenvError(
self.__class__,
"add_coord_geom",
"This coordination geometry is already present and override is set to False",
)
self.coord_geoms[mp_symbol] = {
"symmetry_measure": float(symmetry_measure),
"algo": algo,
"permutation": [int(i) for i in permutation],
"local2perfect_map": local2perfect_map,
"perfect2local_map": perfect2local_map,
"detailed_voronoi_index": detailed_voronoi_index,
"other_symmetry_measures": other_symmetry_measures,
"rotation_matrix": rotation_matrix,
"scaling_factor": scaling_factor,
}
def __str__(self):
"""
Returns a string representation of the ChemicalEnvironments object.
Returns:
String representation of the ChemicalEnvironments object.
"""
out = "Chemical environments object :\n"
if len(self.coord_geoms) == 0:
out += " => No coordination in it <=\n"
return out
for key in self.coord_geoms.keys():
mp_symbol = key
break
cn = symbol_cn_mapping[mp_symbol]
out += f" => Coordination {cn} <=\n"
mp_symbols = list(self.coord_geoms.keys())
csms_wcs = [self.coord_geoms[mp_symbol]["other_symmetry_measures"]["csm_wcs_ctwcc"] for mp_symbol in mp_symbols]
icsms_sorted = np.argsort(csms_wcs)
mp_symbols = [mp_symbols[ii] for ii in icsms_sorted]
for mp_symbol in mp_symbols:
csm_wcs = self.coord_geoms[mp_symbol]["other_symmetry_measures"]["csm_wcs_ctwcc"]
csm_wocs = self.coord_geoms[mp_symbol]["other_symmetry_measures"]["csm_wocs_ctwocc"]
out += f" - {mp_symbol}\n"
out += f" csm1 (with central site) : {csm_wcs}"
out += f" csm2 (without central site) : {csm_wocs}"
out += f" algo : {self.coord_geoms[mp_symbol]['algo']}"
out += f" perm : {self.coord_geoms[mp_symbol]['permutation']}\n"
out += f" local2perfect : {str(self.coord_geoms[mp_symbol]['local2perfect_map'])}\n"
out += f" perfect2local : {str(self.coord_geoms[mp_symbol]['perfect2local_map'])}\n"
return out
def is_close_to(self, other, rtol=0.0, atol=1e-8):
"""
Whether this ChemicalEnvironments object is close to another one.
Args:
other: Another ChemicalEnvironments object.
rtol: Relative tolerance for the comparison of Continuous Symmetry Measures.
atol: Absolute tolerance for the comparison of Continuous Symmetry Measures.
Returns:
True if the two ChemicalEnvironments objects are close to each other.
"""
if set(self.coord_geoms.keys()) != set(other.coord_geoms.keys()):
return False
for mp_symbol, cg_dict_self in self.coord_geoms.items():
cg_dict_other = other[mp_symbol]
other_csms_self = cg_dict_self["other_symmetry_measures"]
other_csms_other = cg_dict_other["other_symmetry_measures"]
for csmtype in [
"csm_wcs_ctwcc",
"csm_wcs_ctwocc",
"csm_wcs_csc",
"csm_wocs_ctwcc",
"csm_wocs_ctwocc",
"csm_wocs_csc",
]:
if not np.isclose(
other_csms_self[csmtype],
other_csms_other[csmtype],
rtol=rtol,
atol=atol,
):
return False
return True
def __eq__(self, other):
"""
Equality method that checks if the ChemicalEnvironments object is equal to another ChemicalEnvironments.
object.
Args:
other: ChemicalEnvironments object to compare with.
Returns:
True if both objects are equal, False otherwise.
"""
if set(self.coord_geoms.keys()) != set(other.coord_geoms.keys()):
return False
for mp_symbol, cg_dict_self in self.coord_geoms.items():
cg_dict_other = other.coord_geoms[mp_symbol]
if cg_dict_self["symmetry_measure"] != cg_dict_other["symmetry_measure"]:
return False
if cg_dict_self["algo"] != cg_dict_other["algo"]:
return False
if cg_dict_self["permutation"] != cg_dict_other["permutation"]:
return False
if cg_dict_self["detailed_voronoi_index"] != cg_dict_other["detailed_voronoi_index"]:
return False
other_csms_self = cg_dict_self["other_symmetry_measures"]
other_csms_other = cg_dict_other["other_symmetry_measures"]
for csmtype in [
"csm_wcs_ctwcc",
"csm_wcs_ctwocc",
"csm_wcs_csc",
"csm_wocs_ctwcc",
"csm_wocs_ctwocc",
"csm_wocs_csc",
]:
if other_csms_self[csmtype] != other_csms_other[csmtype]:
return False
return True
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Returns a dictionary representation of the ChemicalEnvironments object.
Returns:
A dictionary representation of the ChemicalEnvironments object.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"coord_geoms": jsanitize(self.coord_geoms),
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the ChemicalEnvironments object from a dict representation of the ChemicalEnvironments created
using the as_dict method.
Args:
d: dict representation of the ChemicalEnvironments object.
Returns:
ChemicalEnvironments object.
"""
ce = cls()
for cg in d["coord_geoms"].keys():
if d["coord_geoms"][cg]["local2perfect_map"] is None:
l2p_map = None
else:
l2p_map = {int(key): int(val) for key, val in d["coord_geoms"][cg]["local2perfect_map"].items()}
if d["coord_geoms"][cg]["perfect2local_map"] is None:
p2l_map = None
else:
p2l_map = {int(key): int(val) for key, val in d["coord_geoms"][cg]["perfect2local_map"].items()}
if (
"other_symmetry_measures" in d["coord_geoms"][cg]
and d["coord_geoms"][cg]["other_symmetry_measures"] is not None
):
other_csms = d["coord_geoms"][cg]["other_symmetry_measures"]
else:
other_csms = None
ce.add_coord_geom(
cg,
d["coord_geoms"][cg]["symmetry_measure"],
d["coord_geoms"][cg]["algo"],
permutation=d["coord_geoms"][cg]["permutation"],
local2perfect_map=l2p_map,
perfect2local_map=p2l_map,
detailed_voronoi_index=d["coord_geoms"][cg]["detailed_voronoi_index"],
other_symmetry_measures=other_csms,
rotation_matrix=d["coord_geoms"][cg]["rotation_matrix"],
scaling_factor=d["coord_geoms"][cg]["scaling_factor"],
)
return ce
| materialsproject/pymatgen | pymatgen/analysis/chemenv/coordination_environments/structure_environments.py | Python | mit | 104,682 | [
"pymatgen"
] | 9fe8aeb5c8996e32d9174bc1b72493819934853439d963bc176d1850d8471015 |
from skimage import feature, io, filters, transform
img_file = "../data/similar_objects/4.JPG"
# load the image and convert it to a floating point data type
image = io.imread(img_file)
image = transform.rescale(image, 0.4)
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from math import sqrt
from skimage.color import rgb2gray
image_gray = rgb2gray(image)
plt.imshow(image_gray)
plt.show()
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
#blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log]
colors = ['yellow']
titles = ['Laplacian of Gaussian']
sequence = zip(blobs_list, colors, titles)
fig,axes = plt.subplots(1, 2, sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
axes = axes.ravel()
for blobs, color, title in sequence:
ax = axes[0]
axes = axes[1:]
ax.set_title(title)
ax.imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), 2, color=color, linewidth=2, fill=False)
ax.add_patch(c)
plt.show() | m3rik/nn | CNTK/GeneralizedHoughTransform/blobs.py | Python | apache-2.0 | 1,228 | [
"Gaussian"
] | a16c9e6708596e3ad79f45a1e205f3d094595959ba6b99a45fbdd6e3ddb6dc0e |
import sys
import os
import pysam
import difflib
IS_PYTHON3 = sys.version_info[0] >= 3
if IS_PYTHON3:
from itertools import zip_longest
from urllib.request import urlopen
else:
from itertools import izip as zip_longest
from urllib2 import urlopen
def checkBinaryEqual(filename1, filename2):
'''return true if the two files are binary equal.
'''
if os.path.getsize(filename1) != os.path.getsize(filename2):
return False
infile1 = open(filename1, "rb")
infile2 = open(filename2, "rb")
def chariter(infile):
while 1:
c = infile.read(1)
if c == b"":
break
yield c
found = False
for c1, c2 in zip_longest(chariter(infile1), chariter(infile2)):
if c1 != c2:
break
else:
found = True
infile1.close()
infile2.close()
return found
def checkSamtoolsViewEqual(filename1, filename2,
without_header=False):
'''return true if the two files are equal in their
content through samtools view.
'''
# strip MD and NM tags, as not preserved in CRAM files
args = ["-x", "MD", "-x", "NM"]
if not without_header:
args.append("-h")
lines1 = pysam.view(*(args + [filename1]))
lines2 = pysam.view(*(args + [filename2]))
if len(lines1) != len(lines2):
return False
if lines1 != lines2:
# line by line comparison
# sort each line, as tags get rearranged between
# BAM/CRAM
for n, pair in enumerate(zip(lines1, lines2)):
l1, l2 = pair
l1 = sorted(l1[:-1].split("\t"))
l2 = sorted(l2[:-1].split("\t"))
if l1 != l2:
print "mismatch in line %i" % n
print l1
print l2
return False
else:
return False
return True
def checkURL(url):
'''return True if URL is available.
A URL might not be available if it is the wrong URL
or there is no connection to the URL.
'''
try:
urlopen(url, timeout=1)
return True
except:
return False
def checkFieldEqual(cls, read1, read2, exclude=[]):
'''check if two reads are equal by comparing each field.'''
# add the . for refactoring purposes.
for x in (".query_name",
".query_sequence",
".flag",
".reference_id",
".reference_start",
".mapping_quality",
".cigartuples",
".next_reference_id",
".next_reference_start",
".template_length",
".query_length",
".query_qualities",
".bin",
".is_paired", ".is_proper_pair",
".is_unmapped", ".mate_is_unmapped",
".is_reverse", ".mate_is_reverse",
".is_read1", ".is_read2",
".is_secondary", ".is_qcfail",
".is_duplicate"):
n = x[1:]
if n in exclude:
continue
cls.assertEqual(getattr(read1, n), getattr(read2, n),
"attribute mismatch for %s: %s != %s" %
(n, getattr(read1, n), getattr(read2, n)))
| nlhepler/pysam | tests/TestUtils.py | Python | mit | 3,260 | [
"pysam"
] | ab7e83d06c716e728048ce5deb45dcadc206210bc0de5d5c48fa7862aed6b7e8 |
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv4 import Fmat_original_hshv
from data_variable_hslv4 import Fmat_original_hslv
from data_variable_lshv4 import Fmat_original_lshv
from data_variable_lslv4 import Fmat_original_lslv
# Scaling function
def scaling(mat):
Fvec_a = mat[0:201,0:]
Fvec_b = mat[201:402,0:]
Fvec_c = mat[402:603,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((10,1))
mu_2 = np.zeros((10,1))
cov = np.zeros((10,2,2))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
# Scaling wrt all data
Fmat_rf_hshv = scaling(Fmat_original_hshv[:,0:15])
Fmat_rm_hshv = Fmat_original_hshv[:,15:15]
Fmat_sf_hshv = scaling(Fmat_original_hshv[:,15:25])
Fmat_sm_hshv = Fmat_original_hshv[:,25:25]
Fmat_hshv = np.matrix(np.column_stack((Fmat_rf_hshv,Fmat_rm_hshv,Fmat_sf_hshv,Fmat_sm_hshv)))
Fmat_rf_hslv = scaling(Fmat_original_hslv[:,0:15])
Fmat_rm_hslv = scaling(Fmat_original_hslv[:,15:29])
Fmat_sf_hslv = scaling(Fmat_original_hslv[:,29:44])
Fmat_sm_hslv = scaling(Fmat_original_hslv[:,44:49])
Fmat_hslv = np.matrix(np.column_stack((Fmat_rf_hslv,Fmat_rm_hslv,Fmat_sf_hslv,Fmat_sm_hslv)))
Fmat_rf_lshv = scaling(Fmat_original_lshv[:,0:15])
Fmat_rm_lshv = Fmat_original_lshv[:,15:15]
Fmat_sf_lshv = scaling(Fmat_original_lshv[:,15:20])
Fmat_sm_lshv = scaling(Fmat_original_lshv[:,20:25])
Fmat_lshv = np.matrix(np.column_stack((Fmat_rf_lshv,Fmat_rm_lshv,Fmat_sf_lshv,Fmat_sm_lshv)))
Fmat_rf_lslv = scaling(Fmat_original_lslv[:,0:15])
Fmat_rm_lslv = scaling(Fmat_original_lslv[:,15:24])
Fmat_sf_lslv = scaling(Fmat_original_lslv[:,24:31])
Fmat_sm_lslv = scaling(Fmat_original_lslv[:,31:36])
Fmat_lslv = np.matrix(np.column_stack((Fmat_rf_lslv,Fmat_rm_lslv,Fmat_sf_lslv,Fmat_sm_lslv)))
Fmat = np.matrix(np.column_stack((Fmat_hshv,Fmat_hslv,Fmat_lshv,Fmat_lslv)))
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.1] * 10
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_force_hshv,mu_rf_motion_hshv,cov_rf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))))
mu_rm_force_hshv,mu_rm_motion_hshv,cov_rm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,15:29], Fmat_lshv[0:201,15:15], Fmat_lslv[0:201,15:24])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,15:29], Fmat_lshv[402:603,15:15], Fmat_lslv[402:603,15:24])))))
mu_sf_force_hshv,mu_sf_motion_hshv,cov_sf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,29:44], Fmat_lshv[0:201,15:20], Fmat_lslv[0:201,24:31])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,29:44], Fmat_lshv[402:603,15:20], Fmat_lslv[402:603,24:31])))))
mu_sm_force_hshv,mu_sm_motion_hshv,cov_sm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,44:49], Fmat_lshv[0:201,20:25], Fmat_lslv[0:201,31:36])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,44:49], Fmat_lshv[402:603,20:25], Fmat_lslv[402:603,31:36])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = [0.0]*10
B_rm_hshv = [0.0]*10
B_sf_hshv = [0.0]*10
B_sm_hshv = [0.0]*10
for num_states in range(10):
B_rf_hshv[num_states] = [[mu_rf_force_hshv[num_states][0],mu_rf_motion_hshv[num_states][0]],[cov_rf_hshv[num_states][0][0],cov_rf_hshv[num_states][0][1],cov_rf_hshv[num_states][1][0],cov_rf_hshv[num_states][1][1]]]
B_rm_hshv[num_states] = [[mu_rm_force_hshv[num_states][0],mu_rm_motion_hshv[num_states][0]],[cov_rm_hshv[num_states][0][0],cov_rm_hshv[num_states][0][1],cov_rm_hshv[num_states][1][0],cov_rm_hshv[num_states][1][1]]]
B_sf_hshv[num_states] = [[mu_sf_force_hshv[num_states][0],mu_sf_motion_hshv[num_states][0]],[cov_sf_hshv[num_states][0][0],cov_sf_hshv[num_states][0][1],cov_sf_hshv[num_states][1][0],cov_sf_hshv[num_states][1][1]]]
B_sm_hshv[num_states] = [[mu_sm_force_hshv[num_states][0],mu_sm_motion_hshv[num_states][0]],[cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]]]
print cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]
print "----"
#print B_sm_hshv
#print mu_sm_motion_hshv
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))
total_seq_rm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,15:29], Fmat_lshv[0:201,15:15], Fmat_lslv[0:201,15:24])))
total_seq_sf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,29:44], Fmat_lshv[0:201,15:20], Fmat_lslv[0:201,24:31])))
total_seq_sm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,44:49], Fmat_lshv[0:201,20:25], Fmat_lslv[0:201,31:36])))
total_seq_rf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))
total_seq_rm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,15:29], Fmat_lshv[402:603,15:15], Fmat_lslv[402:603,15:24])))
total_seq_sf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,29:44], Fmat_lshv[402:603,15:20], Fmat_lslv[402:603,24:31])))
total_seq_sm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,44:49], Fmat_lshv[402:603,20:25], Fmat_lslv[402:603,31:36])))
total_seq_rf_hshv = np.zeros((402,45))
total_seq_rm_hshv = np.zeros((402,23))
total_seq_sf_hshv = np.zeros((402,27))
total_seq_sm_hshv = np.zeros((402,15))
i = 0
j = 0
while i < 402:
total_seq_rf_hshv[i] = total_seq_rf_force_hshv[j]
total_seq_rf_hshv[i+1] = total_seq_rf_motion_hshv[j]
total_seq_rm_hshv[i] = total_seq_rm_force_hshv[j]
total_seq_rm_hshv[i+1] = total_seq_rm_motion_hshv[j]
total_seq_sf_hshv[i] = total_seq_sf_force_hshv[j]
total_seq_sf_hshv[i+1] = total_seq_sf_motion_hshv[j]
total_seq_sm_hshv[i] = total_seq_sm_force_hshv[j]
total_seq_sm_hshv[i+1] = total_seq_sm_motion_hshv[j]
j=j+1
i=i+2
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = np.zeros((402,25))
total_seq_obj_force_hshv = Fmat_hshv[0:201,:]
total_seq_obj_motion_hshv = Fmat_hshv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_hshv[i] = total_seq_obj_force_hshv[j]
total_seq_obj_hshv[i+1] = total_seq_obj_motion_hshv[j]
j=j+1
i=i+2
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[:,k]).T).tolist()
new_test_seq_obj_hshv = np.array(test_seq_obj_hshv)
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,15:25])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,25:25])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,15:25])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,25:25])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,15:25])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,25:25])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,15:25])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,25:25])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_force_hslv,mu_rf_motion_hslv,cov_rf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))))
mu_rm_force_hslv,mu_rm_motion_hslv,cov_rm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:15], Fmat_lshv[0:201,15:15], Fmat_lslv[0:201,15:24])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:15], Fmat_lshv[402:603,15:15], Fmat_lslv[402:603,15:24])))))
mu_sf_force_hslv,mu_sf_motion_hslv,cov_sf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:25], Fmat_lshv[0:201,15:20], Fmat_lslv[0:201,24:31])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:25], Fmat_lshv[402:603,15:20], Fmat_lslv[402:603,24:31])))))
mu_sm_force_hslv,mu_sm_motion_hslv,cov_sm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,25:25], Fmat_lshv[0:201,20:25], Fmat_lslv[0:201,31:36])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,25:25], Fmat_lshv[402:603,20:25], Fmat_lslv[402:603,31:36])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = [0.0]*10
B_rm_hslv = [0.0]*10
B_sf_hslv = [0.0]*10
B_sm_hslv = [0.0]*10
for num_states in range(10):
B_rf_hslv[num_states] = [[mu_rf_force_hslv[num_states][0],mu_rf_motion_hslv[num_states][0]],[cov_rf_hslv[num_states][0][0],cov_rf_hslv[num_states][0][1],cov_rf_hslv[num_states][1][0],cov_rf_hslv[num_states][1][1]]]
B_rm_hslv[num_states] = [[mu_rm_force_hslv[num_states][0],mu_rm_motion_hslv[num_states][0]],[cov_rm_hslv[num_states][0][0],cov_rm_hslv[num_states][0][1],cov_rm_hslv[num_states][1][0],cov_rm_hslv[num_states][1][1]]]
B_sf_hslv[num_states] = [[mu_sf_force_hslv[num_states][0],mu_sf_motion_hslv[num_states][0]],[cov_sf_hslv[num_states][0][0],cov_sf_hslv[num_states][0][1],cov_sf_hslv[num_states][1][0],cov_sf_hslv[num_states][1][1]]]
B_sm_hslv[num_states] = [[mu_sm_force_hslv[num_states][0],mu_sm_motion_hslv[num_states][0]],[cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]]]
print cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]
print "----"
#print B_sm_hslv
#print mu_sm_motion_hslv
# generate RF, RM, SF, SM models from parameters
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))
total_seq_rm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:15], Fmat_lshv[0:201,15:15], Fmat_lslv[0:201,15:24])))
total_seq_sf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:25], Fmat_lshv[0:201,15:20], Fmat_lslv[0:201,24:31])))
total_seq_sm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,25:25], Fmat_lshv[0:201,20:25], Fmat_lslv[0:201,31:36])))
total_seq_rf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))
total_seq_rm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:15], Fmat_lshv[402:603,15:15], Fmat_lslv[402:603,15:24])))
total_seq_sf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:25], Fmat_lshv[402:603,15:20], Fmat_lslv[402:603,24:31])))
total_seq_sm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,25:25], Fmat_lshv[402:603,20:25], Fmat_lslv[402:603,31:36])))
total_seq_rf_hslv = np.zeros((402,45))
total_seq_rm_hslv = np.zeros((402,9))
total_seq_sf_hslv = np.zeros((402,22))
total_seq_sm_hslv = np.zeros((402,10))
i = 0
j = 0
while i < 402:
total_seq_rf_hslv[i] = total_seq_rf_force_hslv[j]
total_seq_rf_hslv[i+1] = total_seq_rf_motion_hslv[j]
total_seq_rm_hslv[i] = total_seq_rm_force_hslv[j]
total_seq_rm_hslv[i+1] = total_seq_rm_motion_hslv[j]
total_seq_sf_hslv[i] = total_seq_sf_force_hslv[j]
total_seq_sf_hslv[i+1] = total_seq_sf_motion_hslv[j]
total_seq_sm_hslv[i] = total_seq_sm_force_hslv[j]
total_seq_sm_hslv[i+1] = total_seq_sm_motion_hslv[j]
j=j+1
i=i+2
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = np.zeros((402,49))
total_seq_obj_force_hslv = Fmat_hslv[0:201,:]
total_seq_obj_motion_hslv = Fmat_hslv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_hslv[i] = total_seq_obj_force_hslv[j]
total_seq_obj_hslv[i+1] = total_seq_obj_motion_hslv[j]
j=j+1
i=i+2
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[:,k]).T).tolist()
new_test_seq_obj_hslv = np.array(test_seq_obj_hslv)
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:29])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,29:44])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,44:49])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:29])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,29:44])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,44:49])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:29])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,29:44])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,44:49])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:29])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,29:44])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,44:49])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_force_lshv,mu_rf_motion_lshv,cov_rf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lslv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lslv[402:603,0:15])))))
mu_rm_force_lshv,mu_rm_motion_lshv,cov_rm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:15], Fmat_hslv[0:201,15:29], Fmat_lslv[0:201,15:24])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:15], Fmat_hslv[402:603,15:29], Fmat_lslv[402:603,15:24])))))
mu_sf_force_lshv,mu_sf_motion_lshv,cov_sf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:25], Fmat_hslv[0:201,29:44], Fmat_lslv[0:201,24:31])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:25], Fmat_hslv[402:603,29:44], Fmat_lslv[402:603,24:31])))))
mu_sm_force_lshv,mu_sm_motion_lshv,cov_sm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,25:25], Fmat_hslv[0:201,44:49], Fmat_lslv[0:201,31:36])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,25:25], Fmat_hslv[402:603,44:49], Fmat_lslv[402:603,31:36])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = [0.0]*10
B_rm_lshv = [0.0]*10
B_sf_lshv = [0.0]*10
B_sm_lshv = [0.0]*10
for num_states in range(10):
B_rf_lshv[num_states] = [[mu_rf_force_lshv[num_states][0],mu_rf_motion_lshv[num_states][0]],[cov_rf_lshv[num_states][0][0],cov_rf_lshv[num_states][0][1],cov_rf_lshv[num_states][1][0],cov_rf_lshv[num_states][1][1]]]
B_rm_lshv[num_states] = [[mu_rm_force_lshv[num_states][0],mu_rm_motion_lshv[num_states][0]],[cov_rm_lshv[num_states][0][0],cov_rm_lshv[num_states][0][1],cov_rm_lshv[num_states][1][0],cov_rm_lshv[num_states][1][1]]]
B_sf_lshv[num_states] = [[mu_sf_force_lshv[num_states][0],mu_sf_motion_lshv[num_states][0]],[cov_sf_lshv[num_states][0][0],cov_sf_lshv[num_states][0][1],cov_sf_lshv[num_states][1][0],cov_sf_lshv[num_states][1][1]]]
B_sm_lshv[num_states] = [[mu_sm_force_lshv[num_states][0],mu_sm_motion_lshv[num_states][0]],[cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]]]
print cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]
print "----"
#print B_sm_lshv
#print mu_sm_motion_lshv
# generate RF, RM, SF, SM models from parameters
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lslv[0:201,0:15])))
total_seq_rm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:15], Fmat_hslv[0:201,15:29], Fmat_lslv[0:201,15:24])))
total_seq_sf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:25], Fmat_hslv[0:201,29:44], Fmat_lslv[0:201,24:31])))
total_seq_sm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,25:25], Fmat_hslv[0:201,44:49], Fmat_lslv[0:201,31:36])))
total_seq_rf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lslv[402:603,0:15])))
total_seq_rm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:15], Fmat_hslv[402:603,15:29], Fmat_lslv[402:603,15:24])))
total_seq_sf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:25], Fmat_hslv[402:603,29:44], Fmat_lslv[402:603,24:31])))
total_seq_sm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,25:25], Fmat_hslv[402:603,44:49], Fmat_lslv[402:603,31:36])))
total_seq_rf_lshv = np.zeros((402,45))
total_seq_rm_lshv = np.zeros((402,23))
total_seq_sf_lshv = np.zeros((402,32))
total_seq_sm_lshv = np.zeros((402,10))
i = 0
j = 0
while i < 402:
total_seq_rf_lshv[i] = total_seq_rf_force_lshv[j]
total_seq_rf_lshv[i+1] = total_seq_rf_motion_lshv[j]
total_seq_rm_lshv[i] = total_seq_rm_force_lshv[j]
total_seq_rm_lshv[i+1] = total_seq_rm_motion_lshv[j]
total_seq_sf_lshv[i] = total_seq_sf_force_lshv[j]
total_seq_sf_lshv[i+1] = total_seq_sf_motion_lshv[j]
total_seq_sm_lshv[i] = total_seq_sm_force_lshv[j]
total_seq_sm_lshv[i+1] = total_seq_sm_motion_lshv[j]
j=j+1
i=i+2
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = np.zeros((402,25))
total_seq_obj_force_lshv = Fmat_lshv[0:201,:]
total_seq_obj_motion_lshv = Fmat_lshv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_lshv[i] = total_seq_obj_force_lshv[j]
total_seq_obj_lshv[i+1] = total_seq_obj_motion_lshv[j]
j=j+1
i=i+2
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[:,k]).T).tolist()
new_test_seq_obj_lshv = np.array(test_seq_obj_lshv)
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,15:20])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,20:25])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,15:20])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,20:25])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,15:20])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,20:25])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,15:20])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,20:25])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_force_lslv,mu_rf_motion_lslv,cov_rf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15])))))
mu_rm_force_lslv,mu_rm_motion_lslv,cov_rm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:15], Fmat_hslv[0:201,15:29], Fmat_lshv[0:201,15:15])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:15], Fmat_hslv[402:603,15:29], Fmat_lshv[402:603,15:15])))))
mu_sf_force_lslv,mu_sf_motion_lslv,cov_sf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:25], Fmat_hslv[0:201,29:44], Fmat_lshv[0:201,15:20])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:25], Fmat_hslv[402:603,29:44], Fmat_lshv[402:603,15:20])))))
mu_sm_force_lslv,mu_sm_motion_lslv,cov_sm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,25:25], Fmat_hslv[0:201,44:49], Fmat_lshv[0:201,20:25])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,25:25], Fmat_hslv[402:603,44:49], Fmat_lshv[402:603,20:25])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = [0.0]*10
B_rm_lslv = [0.0]*10
B_sf_lslv = [0.0]*10
B_sm_lslv = [0.0]*10
for num_states in range(10):
B_rf_lslv[num_states] = [[mu_rf_force_lslv[num_states][0],mu_rf_motion_lslv[num_states][0]],[cov_rf_lslv[num_states][0][0],cov_rf_lslv[num_states][0][1],cov_rf_lslv[num_states][1][0],cov_rf_lslv[num_states][1][1]]]
B_rm_lslv[num_states] = [[mu_rm_force_lslv[num_states][0],mu_rm_motion_lslv[num_states][0]],[cov_rm_lslv[num_states][0][0],cov_rm_lslv[num_states][0][1],cov_rm_lslv[num_states][1][0],cov_rm_lslv[num_states][1][1]]]
B_sf_lslv[num_states] = [[mu_sf_force_lslv[num_states][0],mu_sf_motion_lslv[num_states][0]],[cov_sf_lslv[num_states][0][0],cov_sf_lslv[num_states][0][1],cov_sf_lslv[num_states][1][0],cov_sf_lslv[num_states][1][1]]]
B_sm_lslv[num_states] = [[mu_sm_force_lslv[num_states][0],mu_sm_motion_lslv[num_states][0]],[cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]]]
print cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]
print "----"
#print B_sm_lslv
#print mu_sm_motion_lslv
# generate RF, RM, SF, SM models from parameters
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15])))
total_seq_rm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:15], Fmat_hslv[0:201,15:29], Fmat_lshv[0:201,15:15])))
total_seq_sf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:25], Fmat_hslv[0:201,29:44], Fmat_lshv[0:201,15:20])))
total_seq_sm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,25:25], Fmat_hslv[0:201,44:49], Fmat_lshv[0:201,20:25])))
total_seq_rf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15])))
total_seq_rm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:15], Fmat_hslv[402:603,15:29], Fmat_lshv[402:603,15:15])))
total_seq_sf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:25], Fmat_hslv[402:603,29:44], Fmat_lshv[402:603,15:20])))
total_seq_sm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,25:25], Fmat_hslv[402:603,44:49], Fmat_lshv[402:603,20:25])))
total_seq_rf_lslv = np.zeros((402,45))
total_seq_rm_lslv = np.zeros((402,14))
total_seq_sf_lslv = np.zeros((402,30))
total_seq_sm_lslv = np.zeros((402,10))
i = 0
j = 0
while i < 402:
total_seq_rf_lslv[i] = total_seq_rf_force_lslv[j]
total_seq_rf_lslv[i+1] = total_seq_rf_motion_lslv[j]
total_seq_rm_lslv[i] = total_seq_rm_force_lslv[j]
total_seq_rm_lslv[i+1] = total_seq_rm_motion_lslv[j]
total_seq_sf_lslv[i] = total_seq_sf_force_lslv[j]
total_seq_sf_lslv[i+1] = total_seq_sf_motion_lslv[j]
total_seq_sm_lslv[i] = total_seq_sm_force_lslv[j]
total_seq_sm_lslv[i+1] = total_seq_sm_motion_lslv[j]
j=j+1
i=i+2
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = np.zeros((402,36))
total_seq_obj_force_lslv = Fmat_lslv[0:201,:]
total_seq_obj_motion_lslv = Fmat_lslv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_lslv[i] = total_seq_obj_force_lslv[j]
total_seq_obj_lslv[i+1] = total_seq_obj_motion_lslv[j]
j=j+1
i=i+2
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[:,k]).T).tolist()
new_test_seq_obj_lslv = np.array(test_seq_obj_lslv)
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
# Find Viterbi Path
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:24])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,24:31])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,31:36])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:24])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,24:31])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,31:36])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:24])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,24:31])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,31:36])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:24])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,24:31])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,31:36])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_motion_10_states.png')
pp.show()
| tapomayukh/projects_in_python | classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 2s/hmm_crossvalidation_force_motion_10_states_scaled_wrt_all_data.py | Python | mit | 39,761 | [
"Mayavi"
] | 96eb7ac2533b76fbe16c02d84d82909d641dd21ce55596c02c428cae00d3469f |
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
# This example illustrates how to set up a kinetic solver and kinetic model
# using the scripting interface. Normally this would be done using the
# Shell::doLoadModel command, and normally would be coordinated by the
# SimManager as the base of the entire model.
# This example creates a bistable model having two enzymes and a reaction.
# One of the enzymes is autocatalytic.
# The model is set up to run using deterministic integration.
# If you pass in the argument 'gssa' it will run with the stochastic
# solver instead.
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.widgets import Slider, Button
import numpy
import moose
import sys
line1 = ""
line2 = ""
def makeModel():
# create container for model
model = moose.Neutral( 'model' )
compartment = moose.CubeMesh( '/model/compartment' )
compartment.volume = 1e-21 # m^3
# the mesh is created automatically by the compartment
mesh = moose.element( '/model/compartment/mesh' )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
enz1 = moose.Enz( '/model/compartment/b/enz1' )
enz2 = moose.Enz( '/model/compartment/c/enz2' )
cplx1 = moose.Pool( '/model/compartment/b/enz1/cplx' )
cplx2 = moose.Pool( '/model/compartment/c/enz2/cplx' )
reac = moose.Reac( '/model/compartment/reac' )
# connect them up for reactions
moose.connect( enz1, 'sub', a, 'reac' )
moose.connect( enz1, 'prd', b, 'reac' )
moose.connect( enz1, 'enz', b, 'reac' )
moose.connect( enz1, 'cplx', cplx1, 'reac' )
moose.connect( enz2, 'sub', b, 'reac' )
moose.connect( enz2, 'prd', a, 'reac' )
moose.connect( enz2, 'enz', c, 'reac' )
moose.connect( enz2, 'cplx', cplx2, 'reac' )
moose.connect( reac, 'sub', a, 'reac' )
moose.connect( reac, 'prd', b, 'reac' )
# connect them up to the compartment for volumes
#for x in ( a, b, c, cplx1, cplx2 ):
# moose.connect( x, 'mesh', mesh, 'mesh' )
# Assign parameters
a.concInit = 1
b.concInit = 0
c.concInit = 0.01
enz1.kcat = 0.4
enz1.Km = 4
enz2.kcat = 0.6
enz2.Km = 0.01
reac.Kf = 0.001
reac.Kb = 0.01
# Create the output tables
graphs = moose.Neutral( '/model/graphs' )
outputA = moose.Table2( '/model/graphs/concA' )
outputB = moose.Table2( '/model/graphs/concB' )
# connect up the tables
moose.connect( outputA, 'requestOut', a, 'getConc' );
moose.connect( outputB, 'requestOut', b, 'getConc' );
def main():
solver = "gsl"
makeModel()
if ( len ( sys.argv ) == 2 ):
solver = sys.argv[1]
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = moose.element( '/model/compartment' )
if ( solver == 'gssa' ):
gsolve = moose.Gsolve( '/model/compartment/ksolve' )
stoich.ksolve = gsolve
else:
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
stoich.ksolve = ksolve
stoich.path = "/model/compartment/##"
moose.setClock( 5, 1.0 ) # clock for the solver
moose.useClock( 5, '/model/compartment/ksolve', 'process' )
runSim()
makeDisplay()
print( "Hit 'enter' to exit" )
sys.stdin.read(1)
quit()
def updateAinit(val):
moose.element( '/model/compartment/a' ).concInit = val
updateDisplay()
def updateBinit(val):
moose.element( '/model/compartment/b' ).concInit = val
updateDisplay()
def updateCinit(val):
moose.element( '/model/compartment/c' ).concInit = val
updateDisplay()
def updateKcat1(val):
moose.element( '/model/compartment/b/enz1' ).kcat = val
updateDisplay()
def updateKcat2(val):
moose.element( '/model/compartment/c/enz2' ).kcat = val
updateDisplay()
def updateDisplay():
runSim()
a = moose.element( '/model/graphs/concA' )
b = moose.element( '/model/graphs/concB' )
line1.set_ydata( a.vector )
line2.set_ydata( b.vector )
'''
def updateDisplay( line1, line2, b, c ):
line1.set_ydata( b.vector )
line2.set_ydata( c.vector )
fig.canvas.draw()
def rescaleAxis( event ):
print "doing Rescale"
lines = ax2.get_lines()
top = 0
for i in lines:
top = max( top, max( i.get_ydata() ) )
ax2.set_ylim( 0, top )
'''
def doQuit( event ):
quit()
def makeDisplay():
global line1
global line2
a = moose.element( '/model/graphs/concA' )
b = moose.element( '/model/graphs/concB' )
img = mpimg.imread( 'simple_bistab.png' )
#plt.ion()
fig = plt.figure( figsize=(8,10) )
png = fig.add_subplot(311)
imgplot = plt.imshow( img )
plt.axis('off')
ax2 = fig.add_subplot(312)
#ax.set_ylim( 0, 0.1 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Time (s)' )
ax2.autoscale( enable = True, axis = 'y' )
plt.title( "States of system. Molecules a and b are swapped at t=100 and 200 to cause state flips." )
t = numpy.arange( 0, a.vector.size, 1 ) #sec
line1, = ax2.plot( t, a.vector, 'r-', label = 'a' )
line2, = ax2.plot( t, b.vector, 'b-', label = 'b' )
plt.legend()
ax = fig.add_subplot(313)
plt.axis('off')
axcolor = 'palegreen'
axReset = plt.axes( [0.25,0.05, 0.30,0.03], facecolor='blue' )
axQuit = plt.axes( [0.60,0.05, 0.30,0.03], facecolor='blue' )
axAinit = plt.axes( [0.25,0.1, 0.65,0.03], facecolor=axcolor )
axBinit = plt.axes( [0.25,0.15, 0.65,0.03], facecolor=axcolor )
axCinit = plt.axes( [0.25,0.20, 0.65,0.03], facecolor=axcolor )
axKcat2 = plt.axes( [0.25,0.25, 0.65,0.03], facecolor=axcolor )
axKcat1 = plt.axes( [0.25,0.30, 0.65,0.03], facecolor=axcolor )
#aInit = Slider( axAinit, 'A init conc', 0, 10, valinit=1.0, valstep=0.2)
reset = Button( axReset, 'Reset', color = 'cyan' )
q = Button( axQuit, 'Quit', color = 'pink' )
aInit = Slider( axAinit, 'A init conc', 0, 10, valinit=1.0 )
bInit = Slider( axBinit, 'B init conc', 0, 10, valinit=0.0 )
cInit = Slider( axCinit, 'C init conc', 0, 0.1, valinit=0.01 )
kcat2 = Slider( axKcat2, 'Kcat for enz2', 0, 2, valinit=0.6 )
kcat1 = Slider( axKcat1, 'Kcat for enz1', 0, 2, valinit=0.4 )
def resetParms( event ):
aInit.reset()
bInit.reset()
cInit.reset()
kcat2.reset()
kcat1.reset()
reset.on_clicked( resetParms )
q.on_clicked( doQuit )
aInit.on_changed( updateAinit )
bInit.on_changed( updateBinit )
cInit.on_changed( updateCinit )
kcat1.on_changed( updateKcat1 )
kcat2.on_changed( updateKcat2 )
plt.show()
def runSim():
moose.reinit()
moose.start( 100.0 ) # Run the model for 100 seconds.
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
# move most molecules over to b
b.conc = b.conc + a.conc * 0.9
a.conc = a.conc * 0.1
moose.start( 100.0 ) # Run the model for 100 seconds.
# move most molecules back to a
a.conc = a.conc + b.conc * 0.99
b.conc = b.conc * 0.01
moose.start( 100.0 ) # Run the model for 100 seconds.
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| BhallaLab/moose | moose-examples/tutorials/ChemicalBistables/simpleBis.py | Python | gpl-3.0 | 8,484 | [
"MOOSE"
] | 02f31f9dc3390e712da0b59bc58c28e0fbb9751af458e7f946d0f61a57459ae6 |
import os
import itertools
import mdtraj
import pandas as pd
cos = np.cos
sin = np.sin
ave = lambda x: x.mean(0).mean(0)
phi0 = np.deg2rad(0.0)
amino_acids = ["A" , "C" , "D" , "E" , "F" , "G" , "H" , "I" , "K" , "L" , "M" , "N" , "Q" , "R" , "S" , "T" , "V" , "W" , "Y"]
labels = ["%s%s" % (a0,a1) for (a0, a1) in itertools.product(amino_acids, repeat=2)]
bad_pairs = ["AI","AY","CV","CY","DW","EF","EW","FA","FC","FI","FM","FN","FQ","FT","FY","IF","IM","IT","IV","IW","IY","LF","LI","LV","LW","MF","MI","ML","MM","MY","NY","QF","QY","SF","SI","TF","TI","TW","VF","VI","VV","VW","WA","WC","WF","WI","WL","WN","WV","YF","YI","YL","YM","YN","YV"]
bad_pairs.extend(["HH", "TH", "KH"])
bad_pairs.extend(["DI", "DK"])
bad_pairs.extend(["AD","CD","DD","FD","KD","LD","ND","QD","RD","TD","VD","WD","YD","EC","EH","SE","LE"])
bad_pairs.extend(["G%s" % aa for aa in amino_acids])
bad_pairs.extend(["%sG" % aa for aa in amino_acids])
labels = list(set(labels).difference(set(bad_pairs)))
small = pd.read_csv("/home/kyleb/src/tjlane/scalar-couplings/kyleb/smaller_couplings.csv", index_col=0)
large = pd.read_csv("/home/kyleb/src/tjlane/scalar-couplings/kyleb/larger_couplings.csv", index_col=0)
averaged = 0.5 * (small + large)
data = pd.DataFrame(index=labels, columns=["expt", "C1", "C2", "S1", "S2", "CS"], dtype='float')
for label in labels:
a0, a1 = label
if not os.path.exists("/home/kyleb/dat/peptides/dcd/%s-capped.dcd" % (label)):
continue
traj = mdtraj.load("/home/kyleb/dat/peptides/dcd/%s-capped.dcd" % (label), top="/home/kyleb/dat/peptides/raw/%s-capped.pdb" % (label))
rid, indices = mdtraj.geometry.atom_sequence_finder(traj, ["H","N", "CA", "HA"], residue_offsets=[0, 0, 0, 0])
phi = mdtraj.geometry.dihedral.compute_dihedrals(traj, indices)
phi = mdtraj.geometry.dihedral.compute_phi(traj)[1]
data["C1"][label] = ave(cos(phi + phi0))
data["C2"][label] = ave(cos(phi + phi0) ** 2.)
data["S1"][label] = ave(sin(phi + phi0))
data["S2"][label] = ave(sin(phi + phi0) ** 2.)
data["CS"][label] = ave(sin(phi + phi0) * cos(phi + phi0))
data["expt"][label] = averaged[a1][a0]
data = data.dropna(axis=0)
y, X = dmatrices('expt ~ C1 + C2 + S1', data=data, return_type='dataframe')
model = sm.OLS(y, X)
results = model.fit()
print results.summary()
data["yhat"] = results.predict()
data["delta"] = data.expt - data.yhat
rms = (data.delta ** 2.).mean() ** 0.5
rms
| hainm/open-forcefield-group | nmr/code/analyze_dcd.py | Python | gpl-2.0 | 2,427 | [
"MDTraj"
] | ef7c5b5ca861094e5d7dcc1a5104ccd11b03ca069bf2d5095935be0c55bbc082 |
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0
"""
Base RPC service class
Authors:
* Brian Granger
* Alexander Glyzov
* Axel Voitier
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov,
# Axel Voitier
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import exceptions
from sys import exc_info
from abc import abstractmethod
from types import GeneratorType
from logging import getLogger
from traceback import format_exc
from functools import partial
from itertools import chain
from zmq.utils import jsonapi
from .base import RPCBase
#-----------------------------------------------------------------------------
# RPC Service base
#-----------------------------------------------------------------------------
class RPCServiceBase(RPCBase):
_GEN_PROTOCOL = set(('_SEND', '_THROW', '_CLOSE'))
_RESERVED = set((
'register', 'register_object', 'proc', 'task',
'start', 'stop', 'serve', 'shutdown', 'reset',
'connect', 'bind', 'bind_ports',
)) | _GEN_PROTOCOL
logger = getLogger('netcall.service')
def __init__(self, *args, **kwargs):
"""
Parameters
==========
serializer : [optional] <Serializer>
An instance of a Serializer subclass that will be used to serialize
and deserialize args, kwargs and the result.
service_id : [optional] <bytes>
"""
service_id = kwargs.pop('service_id', None)
super(RPCServiceBase, self).__init__(*args, **kwargs)
self.service_id = service_id \
or b'%s/%s' % (self.__class__.__name__, self.identity)
self.procedures = {} # {<name> : <callable>}
self.generators = {} # {<req_id> : <Generator>}
# register extra class methods as service procedures
self.register_object(self, restricted=self._RESERVED)
def _parse_request(self, msg_list):
"""
Parse a request
(should not raise an exception)
The request is received as a multipart message:
[<id>..<id>, b'|', req_id, proc_name, <ser_args>, <ser_kwargs>, <ignore>]
Returns either a None or a dict {
'route' : [<id:bytes>, ...], # list of all dealer ids (a return path)
'req_id' : <id:bytes>, # unique message id
'proc' : <callable>, # a task callable
'args' : [<arg1>, ...], # positional arguments
'kwargs' : {<kw1>, ...}, # keyword arguments
'ignore' : <bool>, # ignore result flag
'error' : None or <Exception>
}
"""
logger = self.logger
if len(msg_list) < 6 or b'|' not in msg_list:
logger.error('bad request %r', msg_list)
return None
error = None
args = None
kwargs = None
ignore = None
boundary = msg_list.index(b'|')
name = msg_list[boundary+2]
if name in self._GEN_PROTOCOL:
proc = name
else:
proc = self.procedures.get(name, None)
try:
data = msg_list[boundary+3:boundary+5]
args, kwargs = self._serializer.deserialize_args_kwargs(data)
ignore = bool(int(msg_list[boundary+5]))
except Exception, e:
error = e
if proc is None:
error = NotImplementedError("Unregistered procedure %r" % name)
return dict(
route = msg_list[0:boundary],
req_id = msg_list[boundary+1],
proc = proc,
args = args,
kwargs = kwargs,
ignore = ignore,
error = error,
)
def _build_reply(self, request, typ, data):
"""Build a reply message for status and data.
Parameters
----------
typ : bytes
Either b'ACK', b'OK', b'YIELD' or b'FAIL'.
data : list of bytes
A list of data frame to be appended to the message.
"""
return list(chain(
request['route'],
[b'|', request['req_id'], typ],
data,
))
def _send_reply(self, reply):
""" Send a multipart reply to the ZMQ socket.
Notice: reply is a list produced by self._build_reply()
"""
self.logger.debug('sending %r', reply)
self.socket.send_multipart(reply)
def _send_ack(self, request):
"Send an ACK notification"
reply = self._build_reply(request, b'ACK', [self.service_id])
self._send_reply(reply)
def _send_ok(self, request, result):
"Send an OK reply"
data_list = self._serializer.serialize_result(result)
reply = self._build_reply(request, b'OK', data_list)
self._send_reply(reply)
def _send_yield(self, request, result):
"Send a YIELD reply"
data_list = self._serializer.serialize_result(result)
reply = self._build_reply(request, b'YIELD', data_list)
self._send_reply(reply)
def _send_fail(self, request, with_tb=True):
"Send a FAIL reply"
# take the current exception implicitly
etype, evalue, tb = exc_info()
error_dict = {
'ename' : etype.__name__,
'evalue' : str(evalue),
'traceback' : with_tb and format_exc(tb) or None
}
data_list = [jsonapi.dumps(error_dict)]
reply = self._build_reply(request, b'FAIL', data_list)
self._send_reply(reply)
def _handle_request(self, msg_list):
"""
Handle an incoming request.
The request is received as a multipart message:
[<id>..<id>, b'|', req_id, proc_name, <serialized (args, kwargs)>]
First, the service sends back a notification that the message was
indeed received:
[<id>..<id>, b'|', req_id, b'ACK', service_id]
Next, the actual reply depends on if the call was successful or not:
[<id>..<id>, b'|', req_id, b'OK', <serialized result>]
[<id>..<id>, b'|', req_id, b'YIELD', <serialized result>]*
[<id>..<id>, b'|', req_id, b'FAIL', <JSON dict of ename, evalue, traceback>]
Here the (ename, evalue, traceback) are utf-8 encoded unicode.
In case of a YIELD reply, the client can send a _SEND, _THROW or
_CLOSE messages with the same req_id as in the first message sent.
The first YIELD reply will contain no result to signal the client it is a
yield-generator. The first message sent by the client to a yield-generator
must be a _SEND with None as argument.
[<id>..<id>, b'|', req_id, '_SEND', <serialized (sent value, None)>]
[<id>..<id>, b'|', req_id, '_THROW', <serialized ([ename, evalue], None)>]
[<id>..<id>, b'|', req_id, '_CLOSE', <serialized (None, None)>]
The service will first send an ACK message. Then, it will send a YIELD
reply whenever ready, or a FAIL reply in case an exception is raised.
Termination of the yield-generator happens by throwing an exception.
Normal termination raises a StopIterator. Termination by _CLOSE can
raises a GeneratorExit or a StopIteration depending on the implementation
of the yield-generator. Any other exception raised will also terminate
the yield-generator.
Note: subclasses can override this method if necessary.
"""
req = self._parse_request(msg_list)
if req is None:
return
self.logger.debug(
'CALL: %s, args=%s, kwargs=%s, ignore=%s',
req['proc'], req['args'], req['kwargs'], req['ignore']
)
self._send_ack(req)
ignore = req['ignore']
proc = req['proc']
try:
# raise any parsing errors here
if req['error']:
raise req['error']
if proc in self._GEN_PROTOCOL:
self._handle_gen_protocol(req)
return
else:
# call procedure
res = proc(*req['args'], **req['kwargs'])
except Exception, e:
self.logger.error(e, exc_info=True)
not ignore and self._send_fail(req)
else:
if ignore:
return
if isinstance(res, GeneratorType):
self.generators[req['req_id']] = res
self._send_yield(req, None)
else:
self._send_ok(req, res)
def _handle_gen_protocol(self, req):
""" Handles generator commands (_SEND, _THROW, _CLOSE).
May raise all sorts of exceptions.
"""
req_id = req['req_id']
gen = self.generators.get(req_id, None)
if gen is None:
raise ValueError('no corresponding generator (req_id=%r)' % req_id)
cmd = req['proc']
args = req['args']
try:
if cmd == '_SEND':
res = gen.send(args)
self._send_yield(req, res)
elif cmd == '_THROW':
ex_class = getattr(exceptions, args[0], Exception)
res = gen.throw(ex_class, args[1])
self._send_yield(req, res)
else:
res = gen.close()
raise GeneratorExit
except (GeneratorExit, StopIteration):
self.generators.pop(req_id, None)
self._send_fail(req, with_tb=False)
#-------------------------------------------------------------------------
# Public API
#-------------------------------------------------------------------------
def register(self, func=None, name=None):
""" A decorator to register a callable as a service task.
Examples:
service = TornadoRPCService()
@service.task
def echo(s):
return s
@service.proc(name='work')
def do_nothing():
pass
service.register(lambda: None, name='dummy')
"""
if func is None:
if name is None:
raise ValueError("at least one argument is required")
return partial(self.register, name=name)
else:
if not callable(func):
raise ValueError("func argument should be callable")
if name is None:
name = func.__name__
if name in self._RESERVED:
raise ValueError("{} is a reserved function name".format(name))
self.procedures[name] = func
return func
task = register # alias
proc = register # alias
def register_object(self, obj, restricted=[], namespace=''):
"""
Register public functions of a given object as service tasks.
Give the possibility to not register some restricted functions.
Give the possibility to prefix the service name with a namespace.
Example:
class MyObj(object):
def __init__(self, value):
self._value = value
def value(self):
return self._value
first = MyObj(1)
service.register_object(first)
second = MyObj(2)
service.register_object(second, namespace='second')
third = MyObj(3)
# Actually register nothing
service.register_object(third, namespace='third', restricted=['value'])
# Register a full module
import random
service.register_object(random, namespace='random')
...
client.value() # Returns 1
client.second.value() # Returns 2
client.third.value() # Exception NotImplementedError
client.random.randint(10, 30) # Returns an int
"""
for name in dir(obj):
if name.startswith('_') or (name in restricted) or (name in self._RESERVED):
continue
try: proc = getattr(obj, name)
except: continue
if callable(proc):
self.procedures['.'.join([namespace, name]).lstrip('.')] = proc
@abstractmethod
def start(self):
""" Start the service (non-blocking) """
pass
@abstractmethod
def stop(self):
""" Stop the service (non-blocking) """
pass
@abstractmethod
def serve(self):
""" Serve RPC requests (blocking) """
pass
| srault95/netcall | netcall/base_service.py | Python | bsd-3-clause | 12,879 | [
"Brian"
] | 138321514797a22287c22d2d523b6a865f8a3f585abd37825a7f05607f9dbdab |
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 446 $
# $Date: 2009-01-23 04:20:21 +0100 (Fri, 23 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/agent/Agent.py $
from rlglue.types import Action
from rlglue.types import Observation
class Agent:
# (string) -> void
def agent_init(taskSpecification):
pass
# (Observation) -> Action
def agent_start(observation):
pass
# (double, Observation) -> Action
def agent_step(reward, observation):
pass
# (double) -> void
def agent_end(reward):
pass
# () -> void
def agent_cleanup():
pass
# (string) -> string
def agent_message(message):
pass
| steckdenis/rlglue-py3 | rlglue/agent/Agent.py | Python | apache-2.0 | 1,491 | [
"Brian"
] | 5af13b0986d6b6bda1c94a3ab156d8d6f80a365627bdd3ea77107b4fbf7456d4 |
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
#
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Utility code for computing potential energy profile averaged in the
# azimuthal direction
from read_data import *
from op import *
#from inertia import *
from glob import glob
from datetime import *
from random import uniform
from math import *
import numpy as np
import argparse
import scipy.spatial.distance as sd
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
#from matplotlib import rc
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import vtk
# setting global parameters
#matplotlib.rcParams['text.usetex'] = 'true'
matplotlib.rcParams['lines.linewidth'] = 2
matplotlib.rcParams['axes.linewidth'] = 2
matplotlib.rcParams['xtick.major.size'] = 8
matplotlib.rcParams['ytick.major.size'] = 8
#matplotlib.rcParams['font.size']=40.0
#matplotlib.rcParams['legend.fontsize']=22.0
matplotlib.rcParams['font.size']=28
matplotlib.rcParams['legend.fontsize']=14
cdict = {'red': [(0.0, 0.25, 0.25),
(0.3, 1.0, 1.0),
(0.5, 0.4, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.5),
(0.5, 1.0, 1.0),
(0.75, 0.5, 0.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.7, 1.0, 1.0),
(1.0, 0.25, 0.25)]}
def rotate_matrix_vectorial(axis,theta):
axlen=np.sqrt(axis[:,0]**2+axis[:,1]**2+axis[:,2]**2)
#print axlen
axis[:,0]=axis[:,0]/axlen
axis[:,1]=axis[:,1]/axlen
axis[:,2]=axis[:,2]/axlen
a=np.cos(theta/2)
b=-axis[:,0]*np.sin(theta/2)
c=-axis[:,1]*np.sin(theta/2)
d=-axis[:,2]*np.sin(theta/2)
rotmat=np.empty((len(axis[:,0]),3,3))
rotmat[:,0,0]=a*a+b*b-c*c-d*d
rotmat[:,0,1]=2*(b*c-a*d)
rotmat[:,0,2]=2*(b*d+a*c)
rotmat[:,1,0]=2*(b*c+a*d)
rotmat[:,1,1]=a*a+c*c-b*b-d*d
rotmat[:,1,2]=2*(c*d-a*b)
rotmat[:,2,0]=2*(b*d-a*c)
rotmat[:,2,1]=2*(c*d+a*b)
rotmat[:,2,2]=a*a+d*d-b*b-c*c
return rotmat
def rotate_vectorial(v,n,phi):
vrot=np.empty(np.shape(v))
np.shape(vrot)
rotmat=rotate_matrix_vectorial(n,phi)
np.shape(rotmat)
vrot[:,0]=rotmat[:,0,0]*v[:,0]+rotmat[:,0,1]*v[:,1]+rotmat[:,0,2]*v[:,2]
vrot[:,1]=rotmat[:,1,0]*v[:,0]+rotmat[:,1,1]*v[:,1]+rotmat[:,1,2]*v[:,2]
vrot[:,2]=rotmat[:,2,0]*v[:,0]+rotmat[:,2,1]*v[:,1]+rotmat[:,2,2]*v[:,2]
return vrot
# Fully vectorial version of parallel transport
# 1.determine the cross product of the origins
# 2.compute the magnitude of all the origin and cross vectors
# 3.Compute the dot product of the origins
# 4.The rotation axis is the direction of the cross product
# 5.The rotation angle is the angle between the origin vectors, extracted from the dot product
def parallel_transport(r1,r2,a1,a2):
r2_x_r1=np.cross(r2,r1)
#len_r2_x_r1=np.sqrt(r2_x_r1[:,0]**2+r2_x_r1[:,1]**2+r2_x_r1[:,2]**2)
len_r2_x_r1=np.sqrt(np.sum(r2_x_r1**2,axis=1))
#lenr1=np.sqrt(r1[:,0]**2+r1[:,1]**2+r1[:,2]**2)
lenr1=np.sqrt(np.sum(r1**2,axis=1))
#lenr2=np.sqrt(r2[:,0]**2+r2[:,1]**2+r2[:,2]**2)
lenr2=np.sqrt(np.sum(r2**2,axis=1))
dot_r1r2=r1[:,0]*r2[:,0]+r1[:,1]*r2[:,1]+r1[:,2]*r2[:,2]
n=np.empty(np.shape(r1))
n = r2_x_r1/len_r2_x_r1
#n[:,0] = r2_x_r1[:,0]/len_r2_x_r1
#n[:,1] = r2_x_r1[:,1]/len_r2_x_r1
#n[:,2] = r2_x_r1[:,2]/len_r2_x_r1
phi = np.arccos(dot_r1r2/(lenr1*lenr2))
a2trans=rotate_vectorial(a2,n,-phi)
return a2trans
# same thing for one vector and a set (i.e. a particle and its neigbours)
def parallel_transport_single(r1,r2,a2):
r2_x_r1=np.cross(r2,r1)
len_r2_x_r1=np.sqrt(np.sum(r2_x_r1**2,axis=1))
lenr1=np.sqrt(np.sum(r1**2,axis=1))
lenr2=np.sqrt(np.sum(r2**2,axis=1))
dot_r1r2=np.dot(r1,r2)
n=np.empty(np.shape(r1))
n = r2_x_r1/len_r2_x_r1
phi = np.arccos(dot_r1r2/(lenr1*lenr2))
a2trans=rotate_vectorial(a2,n,-phi)
return a2trans
def compute_energy_and_pressure_rastko(r,k):
eng = np.zeros(len(r))
press = np.zeros(len(r))
a = np.ones(len(r))
dist = sd.cdist(r,r)
for i in range(len(r)):
for j in range(i+1,len(r)):
dr = dist[i,j]
if dr < 2:
diff = 2.0-dr
fact = 0.5*k*diff
eng_val = fact*diff
press_val = fact*dr
eng[i] += eng_val
eng[j] += eng_val
press[i] += press_val
press[j] += press_val
return [eng, press]
def compute_energy_and_pressure(r,k,sigma):
eng = np.zeros(len(r))
press = np.zeros(len(r))
stress = np.zeros((len(r),3,3))
#dist = sd.cdist(r,r)
dmax=4*sigma**2
for i in range(len(r)):
#for i in range(10):
dist=np.sum((r-r[i,:])**2,axis=1)
neighbours=[index for index,value in enumerate(dist) if value <dmax]
neighbours.remove(i)
dr=np.sqrt(dist[neighbours])
diff=2.0-dr
fact = 0.5*k*diff
eng_val = fact*diff
press_val = fact*dr
# Stress (force moment) has to be element by element) r_a F_b = -k r_a dist_b
drvec=r[neighbours,:]-r[i,:]
Fvec=k*((diff/dr).transpose()*(drvec).transpose()).transpose()
for u in range(3):
for v in range(3):
stress[neighbours,u,v]+=0.5*drvec[:,u]*Fvec[:,v]
eng[neighbours]+=eng_val
press[neighbours]+=press_val
return [eng, press, stress]
def getProfiles(f,nbin,radius,stiffness,sigma,debug=False):
print "Processing file : ", f
data = ReadData(f)
#inertia = Inertia(data)
#I = inertia.compute() # Compute moment of inertia
#direction = I[1][:,0] # Presumably smallest component is along z-axis.
# rotate the system such that the principal direction of the moment of inertia
# corresponding to the largest eiqenvalue align with the lab z axis
x, y, z = np.array(data.data[data.keys['x']]), np.array(data.data[data.keys['y']]), np.array(data.data[data.keys['z']])
vx, vy, vz = np.array(data.data[data.keys['vx']]), np.array(data.data[data.keys['vy']]), np.array(data.data[data.keys['vz']])
nx, ny, nz = np.array(data.data[data.keys['nx']]), np.array(data.data[data.keys['ny']]), np.array(data.data[data.keys['nz']])
rval = np.column_stack((x,y,z))
vval = np.column_stack((vx,vy,vz))
nval = np.column_stack((nx,ny,nz))
ez = np.array([0,0,1]) # lab frame z-axis
# Simply get the axis as the mean crossproduct or r and v; assuming alignment. This should also not flip.
direction=np.sum(np.cross(rval,vval),axis=0)
orderpar=direction/len(x)
print orderpar
direction = direction/np.linalg.norm(direction)
axis = np.cross(direction,ez)
axis = axis/np.linalg.norm(axis)
rot_angle = acos(np.dot(direction,ez))
axis0 = np.empty(np.shape(rval))
axis0[:,0] = axis[0]
axis0[:,1] = axis[1]
axis0[:,2] = axis[2]
rval = rotate_vectorial(rval,axis0,-rot_angle)
vval = rotate_vectorial(vval,axis0,-rot_angle)
nval = rotate_vectorial(nval,axis0,-rot_angle)
nval=((nval).transpose()/(np.sqrt(np.sum(nval**2,axis=1))).transpose()).transpose()
rhat=((rval).transpose()/(np.sqrt(np.sum(rval**2,axis=1))).transpose()).transpose()
vel = np.sqrt(vval[:,0]**2 + vval[:,1]**2 + vval[:,2]**2)
velnorm=((vval).transpose()/(vel).transpose()).transpose()
# Determine the Euler angles, essentially. Find theta and phi for each particle, and use it to compute alpha and stress components
# Angle theta with the z axis. arccos is between 0 and pi, so that's ok already
theta=np.arccos(rhat[:,2])
# From the euler angles: rx = sin theta cos phi
# Choosing correct quadrant through the sign of ry=sin theta sin phi
phi=np.sign(rhat[:,1]/(np.sin(theta)))*np.arccos(rhat[:,0]/(np.sin(theta)))
# The other two of our trio of local coordinate vectors
etheta = np.empty(np.shape(rval))
etheta[:,0]=np.cos(theta)*np.cos(phi)
etheta[:,1]=np.cos(theta)*np.sin(phi)
etheta[:,2]=-np.sin(theta)
ephi=np.empty(np.shape(rval))
ephi[:,0]=-np.sin(phi)
ephi[:,1]=np.cos(phi)
ephi[:,2]=0
# Alpha, the angle between the local polarity and the equator; here represented by ephi
alpha=-np.arcsin(np.sum(nval*etheta, axis=1))
# Same thing for the velocity
# No - add pi/2 to get something that does not add up to zero
alpha_v=np.arccos(np.sum(velnorm*etheta, axis=1))
eng, press,stress = compute_energy_and_pressure(rval,stiffness,sigma)
# Project the stresses into the e,theta,phi components. The rr component hast to be 0, and the r cross components
# belong to the projection. So they are not all that interesting.
# We want the theta theta, theta phi, phi theta ant phi phi components (implicitly testing symmetries ...)
# I give up on the notation. Stress is (N,3,3), the axes are (N,3). We want e_i sigma_ij e_j
s_tt=np.sum(etheta*np.einsum('kij,kj->ki',stress,etheta),axis=1)
s_tp=np.sum(etheta*np.einsum('...ij,...j->...i',stress,ephi),axis=1)
s_pt=np.sum(ephi*np.einsum('...ij,...j->...i',stress,etheta),axis=1)
s_pp=np.sum(ephi*np.einsum('...ij,...j->...i',stress,ephi),axis=1)
# Setting up the binning. I changed this to go from -pi/2 to pi/2 consistently. This maybe makes less pretty pictures,
# but the edges are going to be a lot cleaner. Also only one bin to handle accross multiple v0/J.
# Can always rebin to less resolution if necessary
# Position angle with the z axis
theta_bin=np.linspace(0,np.pi,nbin+1)
dtheta=theta_bin[1]-theta_bin[0]
theta_out=theta_bin[:nbin]+dtheta/2-np.pi/2
rho_profile, bin_edges = np.histogram(theta, bins=theta_bin,density=True)
isdata=[index for index,value in enumerate(rho_profile) if (value >0)]
normz=2*np.pi*radius*abs(np.cos(theta_out))
rho_profile[isdata]=rho_profile[isdata]/normz[isdata]
rho_profile/=np.mean(rho_profile)
vel_profile=np.zeros(np.shape(rho_profile))
eng_profile=np.zeros(np.shape(rho_profile))
press_profile=np.zeros(np.shape(rho_profile))
s_tt_profile=np.zeros(np.shape(rho_profile))
s_tp_profile=np.zeros(np.shape(rho_profile))
s_pt_profile=np.zeros(np.shape(rho_profile))
s_pp_profile=np.zeros(np.shape(rho_profile))
alpha_profile=np.zeros(np.shape(rho_profile))
alpha_v_profile=np.zeros(np.shape(rho_profile))
for idx in range(nbin):
inbin=[index for index,value in enumerate(theta) if (value >= theta_bin[idx] and value<=theta_bin[idx+1])]
#print len(inbin)
if len(inbin)>0:
vel_profile[idx]=np.mean(vel[inbin])
eng_profile[idx]=np.mean(eng[inbin])
press_profile[idx]=np.mean(press[inbin])
s_tt_profile[idx]=np.mean(s_tt[inbin])
s_tp_profile[idx]=np.mean(s_tp[inbin])
s_pt_profile[idx]=np.mean(s_pt[inbin])
s_pp_profile[idx]=np.mean(s_pp[inbin])
alpha_profile[idx]=np.mean(alpha[inbin])
alpha_v_profile[idx]=np.mean(alpha_v[inbin])
# Debugging output
if debug==True:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(rval[:,0], rval[:,1], rval[:,2], zdir='z', c='b')
return [theta_out,rho_profile,vel_profile,eng_profile,press_profile,s_tt_profile,s_tp_profile,s_pt_profile,s_pp_profile,alpha_profile,alpha_v_profile,direction,orderpar]
def findLoop(rval,etheta,ephi,dmax):
neighList=[]
Ival=[]
Jval=[]
Inei=[]
count=0
# Identify all neighbours and add them to a list. Keep i->j and j->i separate
# The label is in neighList, the particle numbers are in Ival and Jval
for i in range(len(rval)):
dist=np.sum((rval-rval[i,:])**2,axis=1)
neighbours=[index for index,value in enumerate(dist) if value <dmax]
neighbours.remove(i)
neighList.extend([u for u in range(count,count+len(neighbours))])
Ival.extend([i for k in range(len(neighbours))])
Jval.extend(neighbours)
Inei.append([u for u in range(count,count+len(neighbours))])
count+=len(neighbours)
# Identify loops based on the neighbour list. Kick out any (one-way) contacts that have occured so far
Jarray=np.array(Jval)
LoopList=[]
l=0
while len(neighList)>0:
idx=neighList[0]
idxkeep=idx
#print idx
idx0=[]
#llist0=[]
llist=[]
goneround=False
while goneround==False:
# Sort neighbours counterclockwise according to their local angle
dr0hat=rval[Jval[idx],:]-rval[Ival[idx],:]
dr0hat/=np.sqrt(np.sum(dr0hat**2))
jnei0=Inei[Jval[idx]]
jnei=list(Jarray[jnei0])
drvec=rval[jnei,:]-rval[Jval[idx],:]
drhat=((drvec).transpose()/(np.sqrt(np.sum(drvec**2,axis=1))).transpose()).transpose()
cbeta=np.einsum('kj,j->k',drhat,ephi[Jval[idx],:])
sbeta=np.einsum('kj,j->k',drhat,etheta[Jval[idx],:])
cbeta0=np.dot(dr0hat,ephi[Jval[idx],:])
sbeta0=np.dot(dr0hat,etheta[Jval[idx],:])
# arccos returns between 0 and pi. Just multiply by the sign of the sine
beta=np.arccos(cbeta)*np.sign(sbeta)
# Determine the angles from the contact (read backwards) to the others, and pick the largest, modulo 2pi
beta0=np.arccos(cbeta0)*np.sign(sbeta0)-np.pi
dbeta=beta-beta0
dbeta-=2*np.pi*np.round((dbeta-np.pi)/(2*np.pi))
# and throwing out the particle itself
itself=jnei.index(Ival[idx])
dbeta[itself]=-1
cnt=np.argmax(dbeta)
idx=jnei0[cnt]
goneround = idx in idx0
if goneround==False:
idx0.append(idx)
llist.append(Jarray[idx])
#print idx0
#print llist
#print len(neighList)
for v in idx0:
try:
neighList.remove(v)
except ValueError:
pass
# There may be rare isolated cases (rattlers?) where the first contact itself is not part of the eventual loop.
# This causes problems, because the loop identified after that has been removed.
# Remove the original contact, in case it hasn't
try:
#print idxkeep
neighList.remove(idxkeep)
except ValueError:
pass
LoopList.append(llist)
l+=1
return LoopList,Ival,Jval
def getDefects(f,radius,sigma,outname,symtype='polar',debug=False,writeVTK=False):
print "Processing file : ", f
data = ReadData(f)
if writeVTK:
#outname = '.'.join((f).split('.')[:-1]) + '_data.vtk'
print outname
writeConfigurationVTK(data,outname)
# get the data out of the files
x, y, z = np.array(data.data[data.keys['x']]), np.array(data.data[data.keys['y']]), np.array(data.data[data.keys['z']])
vx, vy, vz = np.array(data.data[data.keys['vx']]), np.array(data.data[data.keys['vy']]), np.array(data.data[data.keys['vz']])
nx, ny, nz = np.array(data.data[data.keys['nx']]), np.array(data.data[data.keys['ny']]), np.array(data.data[data.keys['nz']])
rval = np.column_stack((x,y,z))
vval = np.column_stack((vx,vy,vz))
nval = np.column_stack((nx,ny,nz))
# To be very, very sure that it is exactly normalized
nval=((nval).transpose()/(np.sqrt(np.sum(nval**2,axis=1))).transpose()).transpose()
# Getting the local coordinate system
rhat=((rval).transpose()/(np.sqrt(np.sum(rval**2,axis=1))).transpose()).transpose()
vhat=((vval).transpose()/(np.sqrt(np.sum(vval**2,axis=1))).transpose()).transpose()
theta=np.arccos(rhat[:,2])
# From the euler angles: rx = sin theta cos phi
# Choosing correct quadrant through the sign of ry=sin theta sin phi
phi=np.sign(rhat[:,1]/(np.sin(theta)))*np.arccos(rhat[:,0]/(np.sin(theta)))
# The other two of our trio of local coordinate vectors
etheta = np.empty(np.shape(rval))
etheta[:,0]=np.cos(theta)*np.cos(phi)
etheta[:,1]=np.cos(theta)*np.sin(phi)
etheta[:,2]=-np.sin(theta)
ephi=np.empty(np.shape(rval))
ephi[:,0]=-np.sin(phi)
ephi[:,1]=np.cos(phi)
ephi[:,2]=0
# Trying a simple n^2 algorithm for the defects. Identify all loops by the old trusty Ball-Blumenfeld method
# Parallel transport each neighbor orientation vector back to it? Then compute the Burgers vector.
#dmax=(2.4*sigma)**2
dmax=(2.0*sigma)**2
LoopList,Ival,Jval=findLoop(rval,etheta,ephi,dmax)
numdefect_n=0
numdefect_v=0
# Defect storage, up to 100
# For n and velocity
defects_n=np.zeros((500,4))
defects_v=np.zeros((500,4))
print len(LoopList)
for u in range(len(LoopList)):
# Should already be ordered counterclockwise
# Following a version of the Goldenfeld algorithm, with nx,ny,nz as is playing the role of the order parameter. The sphere is in cartesian space
thisLoop=LoopList[u]
# Generalized algorithm for defects of any type
# The old nematic algorithm, based on the hemispheres
# Count the defect charge. Times two, to use integers and easier if statements
printnow=False
if symtype=='oldnematic':
# The polarization vector nval
ctheta=1
coord=[]
coord.append(nval[thisLoop[0],:])
for t in range(1,len(thisLoop)):
ctheta=np.dot(nval[thisLoop[t],:],np.sign(ctheta)*nval[thisLoop[t-1],:])
# Nematic: append the order parameter, rotated through the *smaller* angle
coord.append(np.sign(ctheta)*nval[thisLoop[t],:])
# Find out if the last point and the starting point are in the same hemisphere.
cdefect=np.dot(coord[t],coord[0])
if cdefect<0:
ndefect=0.5
else:
ndefect=0.0
# The normalized velocity vector vhat
ctheta=1
coord=[]
coord.append(vhat[thisLoop[0],:])
for t in range(1,len(thisLoop)):
ctheta=np.dot(vhat[thisLoop[t],:],np.sign(ctheta)*vhat[thisLoop[t-1],:])
# Nematic: append the order parameter, rotated through the *smaller* angle
coord.append(np.sign(ctheta)*vhat[thisLoop[t],:])
# Find out if the last point and the starting point are in the same hemisphere.
cdefect=np.dot(coord[t],coord[0])
if cdefect<0:
vdefect=0.5
else:
vdefect=0.0
elif symtype=='polar':
# nval
thetatot=0
t0=thisLoop[-1]
for t in thisLoop[0:len(thisLoop)]:
ctheta=np.dot(nval[t,:],nval[t0,:])
stheta=np.dot(rhat[t,:],np.cross(nval[t,:],nval[t0,:]))
theta=np.arccos(ctheta)*np.sign(stheta)
thetatot+=theta
t0=t
# Classify according to defects
# For a polar one, we can only have integer defects
ndefect=int(round(thetatot/(2*np.pi)))
# vhat
thetatot=0
t0=thisLoop[-1]
for t in thisLoop[0:len(thisLoop)]:
ctheta=np.dot(vhat[t,:],vhat[t0,:])
stheta=np.dot(rhat[t,:],np.cross(vhat[t,:],vhat[t0,:]))
theta=np.arccos(ctheta)*np.sign(stheta)
thetatot+=theta
t0=t
#if ctheta<0:
#print "candidate: t t0 ctheta stheta theta thetatot"
#print t, t0, ctheta, stheta, theta, thetatot
#printnow=True
# Classify according to defects
# For a polar one, we can only have integer defects
vdefect=int(round(thetatot/(2*np.pi)))
#if printnow:
#print thetatot
#print thisLoop
elif symtype=='nematic':
# nval
thetatot=0
t0=thisLoop[-1]
ctheta=1
for t in thisLoop[0:len(thisLoop)]:
ctheta=np.dot(nval[t,:],np.sign(ctheta)*nval[t0,:])
stheta=np.dot(rhat[t,:],np.cross(nval[t,:],nval[t0,:]))
theta=np.arccos(np.sign(ctheta)*ctheta)*np.sign(stheta)
thetatot+=theta
t0=t
ndefect=0.5*int(round(thetatot/(np.pi)))
## vhat
#thetatot=0
#t0=thisLoop[0]
##ctheta=1
#for t in thisLoop[1:-1]:
#ctheta=abs(np.dot(vhat[t,:],vhat[t0,:]))
#stheta=np.dot(rhat[t,:],np.cross(nval[t,:],vhat[t0,:]))
#theta=np.arccos(ctheta)*np.sign(stheta)
#thetatot+=theta
#t0=t
#vdefect=0.5*int(round(thetatot/(np.pi)))
else:
print "Unknown alignment symmetry type! Not tracking defects!"
ndefect=0.0
vdefect=0.0
if abs(ndefect)>0:
if numdefect_n<500:
print "Found Defect in orientation field!"
print ndefect
print thetatot
# Construct the geometric centre of the defect
rmhat=np.sum(rval[thisLoop],axis=0)
rmhat/=np.sqrt(np.sum(rmhat**2))
# Charge of the defect
defects_n[numdefect_n,0]=ndefect
# Coordinates of the defect
defects_n[numdefect_n,1:]=radius*rmhat
numdefect_n+=1
#if abs(vdefect)>0:
#if numdefect_v<500:
#print "Found Defect in velocity field!"
#print vdefect
## Construct the geometric centre of the defect
#rmhat=np.sum(rval[thisLoop],axis=0)
#rmhat/=np.sqrt(np.sum(rmhat**2))
## Charge of the defect
#defects_v[numdefect_v,0]=vdefect
## Coordinates of the defect
#defects_v[numdefect_v,1:]=radius*rmhat
#numdefect_v+=1
#print defects
print 'Number of orientation field defects: ' + str(numdefect_n)
print 'Number of velocity field defects: ' + str(numdefect_v)
# Debugging output
if debug==True:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(rval[:,0], rval[:,1], rval[:,2], zdir='z', c='b',s=4)
ax.scatter(defects_n[:,1], defects_n[:,2], defects_n[:,3], zdir='z', c='r',s=50)
ax.scatter(defects_v[:,1], defects_v[:,2], defects_v[:,3], zdir='z', c='g',s=50)
return defects_n, defects_v,numdefect_n,numdefect_v
def writeConfigurationVTK(data,outfile):
Points = vtk.vtkPoints()
has_v = False
has_n = False
if not (data.keys.has_key('x') and data.keys.has_key('y') and data.keys.has_key('z')):
raise "Particle coordinate not specified in the input data."
x = np.array(data.data[data.keys['x']])
y = np.array(data.data[data.keys['y']])
z = np.array(data.data[data.keys['z']])
if (data.keys.has_key('vx') or data.keys.has_key('vy') or data.keys.has_key('vz')):
vx = np.array(data.data[data.keys['vx']])
vy = np.array(data.data[data.keys['vy']])
vz = np.array(data.data[data.keys['vz']])
has_v = True
if (data.keys.has_key('nx') or data.keys.has_key('ny') or data.keys.has_key('nz')):
nx = np.array(data.data[data.keys['nx']])
ny = np.array(data.data[data.keys['ny']])
nz = np.array(data.data[data.keys['nz']])
has_n = True
r = np.ones(len(x))
Radii = vtk.vtkDoubleArray()
Radii.SetNumberOfComponents(1)
Radii.SetName('Radius')
if has_v:
Velocities = vtk.vtkDoubleArray()
Velocities.SetNumberOfComponents(3)
Velocities.SetName("Velocity")
if has_n:
Directors = vtk.vtkDoubleArray()
Directors.SetNumberOfComponents(3)
Directors.SetName("Directors")
NDirectors = vtk.vtkDoubleArray()
NDirectors.SetNumberOfComponents(3)
NDirectors.SetName("NDirectors")
for (xx,yy,zz,rr,nnx,nny,nnz) in zip(x,y,z,r,nx,ny,nz):
Points.InsertNextPoint(xx,yy,zz)
Radii.InsertNextValue(rr)
if has_v:
#vnorm=np.sqrt(vx**2+vy**2+vz**2)
#u=0
for (vvx,vvy,vvz) in zip(vx,vy,vz):
#no=vnorm[u]
#u+=1
#Velocities.InsertNextTuple3(vvx/no,vvy/no,vvz/no)
Velocities.InsertNextTuple3(vvx,vvy,vvz)
if has_n:
for (nnx,nny,nnz) in zip(nx,ny,nz):
Directors.InsertNextTuple3(0.5*nnx,0.5*nny,0.5*nnz)
NDirectors.InsertNextTuple3(-0.5*nnx,-0.5*nny,-0.5*nnz)
#Directors.InsertNextTuple3(nnx,nny,nnz)
#if args.connected:
#Lines = vtk.vtkCellArray()
#Line = vtk.vtkLine()
#points = np.column_stack((x,y,z))
#hull = ConvexHull(points)
#edges = []
#for h in hull.simplices:
#i, j, k = h
#if not sorted([i,j]) in edges: edges.append(sorted([i,j]))
#if not sorted([i,k]) in edges: edges.append(sorted([i,k]))
#if not sorted([j,k]) in edges: edges.append(sorted([j,k]))
#for (i,j) in edges:
#Line.GetPointIds().SetId(0,i)
#Line.GetPointIds().SetId(1,j)
#Lines.InsertNextCell(Line)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
#if args.connected:
#polydata.SetLines(Lines)
polydata.GetPointData().AddArray(Radii)
if has_v:
polydata.GetPointData().AddArray(Velocities)
if has_n:
polydata.GetPointData().AddArray(Directors)
#polydata.GetPointData().AddArray(NDirectors)
#polydata.GetPointData().AddArray(NDirectors)
polydata.Modified()
writer = vtk.vtkXMLPolyDataWriter()
#outname = '.'.join(f.split('.')[:-1])
writer.SetFileName(outfile)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
writer.SetDataModeToAscii()
writer.Write()
def writeDefects(defects_n, defects_v,numdefect_n,numdefect_v,outfile):
# Preparing the vtp output
# Create point structure in vtk
Points = vtk.vtkPoints()
print "Created Points"
# Create (something) associated to the points, with different values for each
Number = vtk.vtkDoubleArray()
Number.SetNumberOfComponents(1)
Number.SetName('Number')
Size = vtk.vtkDoubleArray()
Size.SetNumberOfComponents(1)
Size.SetName('Size')
print "Created Number"
# Put one point at the centre, and the ndefect ones around it
Points.InsertNextPoint(0,0,0)
Number.InsertNextValue(0)
Size.InsertNextValue(0)
for u in range(numdefect_n):
Points.InsertNextPoint(defects_n[u,1],defects_n[u,2],defects_n[u,3])
Number.InsertNextValue(1)
Size.InsertNextValue(1.0)
for u in range(numdefect_v):
Points.InsertNextPoint(defects_v[u,1],defects_v[u,2],defects_v[u,3])
Number.InsertNextValue(2)
Size.InsertNextValue(1.0)
print "Added Particles and Numbers"
lines = vtk.vtkCellArray()
line = vtk.vtkLine()
for i in range(numdefect_n):
line = vtk.vtkLine()
line.GetPointIds().SetId(0,0)
line.GetPointIds().SetId(1,i+1)
lines.InsertNextCell(line)
for i in range(numdefect_v):
line = vtk.vtkLine()
line.GetPointIds().SetId(0,0)
line.GetPointIds().SetId(1,numdefect_n+i+1)
lines.InsertNextCell(line)
print "Added lines"
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
polydata.SetLines(lines)
polydata.GetPointData().AddArray(Number)
polydata.GetPointData().AddArray(Size)
print "Finished Polydata"
polydata.Modified()
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(outfile)
# Python 2.7 vs. 3 incompatibility?
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
writer.SetDataModeToAscii()
writer.Write()
print "Wrote File"
# Scripting version: Only execute if this is called as a script. Otherwise, it attempts to go through here when loading as a module
# and throws errors because some arguments aren't defined
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, help="Input file with particle velocity field")
parser.add_argument("-o", "--output", type=str, default="defects", help="Output file (text file)")
parser.add_argument("-k", "--k", type=float, default=1.0, help="soft potential strength")
parser.add_argument("-R", "--sphere_r", type=float, default=30.0, help="radius of sphere for spherical system")
parser.add_argument("-r", "--particle_r", type=float, default=1.0, help="radius of particle ")
args = parser.parse_args()
print
print "\tActive Particles on Curved Spaces (APCS)"
print "\tPolar and nematic defect finding algoritm"
print
print "\tSilke Henkes"
print "\tUniversity of Aberdeen"
print "\t(c) 2014"
print "\t----------------------------------------------"
print
print "\tInput : ", args.input
print "\tOutput : ", args.output
print "\tSpring constant : ", args.k
print "\tRadius of the sphere : ", args.sphere_r
print "\tRadius of the particle : ", args.particle_r
print
outname = '.'.join((args.input).split('.')[:-1]) + '_data.vtp'
print outname
defects_n, defects_v,numdefect_n,numdefect_v=getDefects(args.input,args.sphere_r,args.particle_r,outname,'nematic',True,True)
outname = '.'.join((args.input).split('.')[:-1]) + '_defects.vtp'
print outname
#writer.SetFileName(args.output+'/'+outname+'.vtp')
#writer.SetFileName(args.output+'.vtp')
writeDefects(defects_n, defects_v,numdefect_n,numdefect_v,outname)
plt.show() | sknepneklab/SAMoS | FormerAnalysis/nematic_defects_analysis.py | Python | gpl-3.0 | 28,489 | [
"VTK"
] | eb94031a15c050326650d200fbead44d78248f0786c7316af60531da6fc22bdd |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkShrinkFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkShrinkFilter(), 'Processing.',
('vtkDataSet',), ('vtkUnstructuredGrid',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| chrisidefix/devide | modules/vtk_basic/vtkShrinkFilter.py | Python | bsd-3-clause | 492 | [
"VTK"
] | 3445702ed54f25467cdb5f9d93a9263b919da20f82a547aa9bc044a71e4b79f4 |
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
'''
# This script is an example of calculation of long range interactions (Coulomb interaction) using
# the Ewald summation method.
#
# The initial data file is 'ini_struct_deserno.dat'. It contains initial information about
# the system: the box size, particle id, position and charge.
#
# File 'deserno_ewald.dat' contains results which were obtained by Markus Deserno for exactly
# the same system. Ewald parameter (alpha = 1.112583061), Cutoff in R space (rspacecutoff = 4.9)
# cutoff in K space (kspacecutoff = 30). It compares the results of Markus Deserno with Espresso++
# implementation. The forces which affect the particles, total energy and the differences between
# Deserno's results and Espresso++ implementation are printed at the end of the script.
'''
# this is an auxiliary function. It reads the results of Deserno from "deserno_ewald.dat"
def readingDesernoForcesFile():
# forces x,y,z
fx, fy, fz = [], [], []
# energy
energy = 0.0
# reading the general information
file = open("deserno_ewald.dat")
i = 0
for line in file:
# energy
if i==6:
tmp = line.split()
energy = float(tmp[0])
# forces
if i>=9:
line = line.replace('{','').replace('}','')
tmp = line.split()
fx.append(float(tmp[0]))
fy.append(float(tmp[1]))
fz.append(float(tmp[2]))
i=i+1
return energy, fx, fy, fz
# end of the function readingDesernoForcesFile
# The script itself
import sys
import mpi4py.MPI as MPI
import espressopp
from espressopp import Real3D
from espressopp.tools import espresso_old
# reading the particle coordinates, charges and box size from old espressopp data file
# file 'ini_struct_deserno.dat' contains the data we need
print "Reading system data:"
Lx, Ly, Lz, x, y, z, type, q, vx,vy,vz,fx,fy,fz,bondpairs = espresso_old.read('ini_struct_deserno.dat')
# creating the system box
box = (Lx, Ly, Lz)
print "System box size:", box
# number of particles
num_particles = len(x)
print "Number of particles = ", num_particles
print "The first particle has coordinates", x[0], y[0], z[0]
'''
# Ewald method suppose to calculate electrostatic interaction dividing it into R space and
# K space part
#
# alpha - Ewald parameter
# rspacecutoff - the cutoff in real space
# kspacecutoff - the cutoff in reciprocal space
'''
alpha = 1.112583061
rspacecutoff = 4.9
kspacecutoff = 30
# seting the skin for Verlet list (it is not important here)
skin = 0.09
# Coulomb prefactor parameters
bjerrumlength = 1.0
temperature = 1.0
coulomb_prefactor = bjerrumlength * temperature
nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size,box,rspacecutoff,skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rspacecutoff, skin)
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# Adding the particles
props = ['id', 'pos', 'type', 'q']
new_particles = []
for i in range(0, num_particles):
part = [ i, Real3D(x[i], y[i], z[i]), type[i], q[i] ]
new_particles.append(part)
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
## potential and interaction ##
# setting the Verlet list
vl = espressopp.VerletList(system, rspacecutoff+skin)
# the R space part of electrostatic interaction according to the Ewald method
'''
Creating the Coulomb potential which is responsible for the R space part according to the
Ewald method.
It is based on the Coulomb prefactor (coulomb_prefactor), Ewald parameter (alpha),
and the cutoff in R space (rspacecutoff)
'''
coulombR_pot = espressopp.interaction.CoulombRSpace(coulomb_prefactor, alpha, rspacecutoff)
# creating the interaction based on the Verlet list
coulombR_int = espressopp.interaction.VerletListCoulombRSpace(vl)
# setting the potential for the interaction between particles of type 0 and 0
coulombR_int.setPotential(type1=0, type2=0, potential = coulombR_pot)
# adding the interaction to the system
system.addInteraction(coulombR_int)
# k space part of electrostatic interaction according to the Ewald method
'''
Creating the Coulomb potential which is responsible for the K space part according to the
Ewald method.
It is based on the system information (system), Coulomb prefactor (coulomb_prefactor),
Ewald parameter (alpha), and the cutoff in K space (kspacecutoff)
'''
ewaldK_pot = espressopp.interaction.CoulombKSpaceEwald(system, coulomb_prefactor, alpha, kspacecutoff)
# creating the interaction based on the Cell list for all particle interaction and potential in K space
ewaldK_int = espressopp.interaction.CellListCoulombKSpaceEwald(system.storage, ewaldK_pot)
# adding the interaction to the system
system.addInteraction(ewaldK_int)
# creating the integrator which based on the Verlet algorithm
integrator = espressopp.integrator.VelocityVerlet(system)
# seting the time step (it is not important here)
integrator.dt = 0.0001
# nothing will be changed in system, just forces will be calculated ones
integrator.run(0)
# reading Deserno results (energy and forces)
energy_Deserno, forceX_Deserno, forceY_Deserno, forceZ_Deserno = readingDesernoForcesFile()
# printing the particle id, force (x,y,z), and force difference (x,y,z)
format0 = '\n %45s %105s \n'
print (format0 % ('forces', 'the difference between Deserno\'s result and forces by Espresso++'))
format1 = '%3s %20s %20s %20s %10s %20s %25s %25s\n'
print (format1 % ('id', 'fx', 'fy', 'fz', ' ', 'dfx', 'dfy', 'dfz'))
format2 = '%3d %3s %3.17f %3s %3.17f %3s %3.17f %10s %3.17f %3s %3.17f %3s %3.17f'
for j in range(0, num_particles):
print (format2 % (j, ' ', \
system.storage.getParticle(j).f.x, ' ', \
system.storage.getParticle(j).f.y, ' ', \
system.storage.getParticle(j).f.z, \
' ', \
abs(system.storage.getParticle(j).f.x-forceX_Deserno[j]), ' ', \
abs(system.storage.getParticle(j).f.y-forceY_Deserno[j]), ' ', \
abs(system.storage.getParticle(j).f.z-forceZ_Deserno[j])) )
# calculating the R space part of electrostatic energy
enR = coulombR_int.computeEnergy()
# calculating the K space part of electrostatic energy
enK = ewaldK_int.computeEnergy()
# total energy
enTot = enR + enK
# printing the total energy and the difference with Deserno results
print '\nTotal energy: %5.16f; The difference in energy (Deserno\'s result, Espresso++): %5.16f\n' % (enTot, enTot-energy_Deserno)
sys.exit()
| kkreis/espressopp | testsuite/ewald/ewald_eppDeserno_comparison.py | Python | gpl-3.0 | 7,543 | [
"ESPResSo"
] | eff709ab89d89e824fa35e37199292032c53c53347687334fd3a1d161870817c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple peak fitting utility with PyMCA/SILX
===========================================
Current fitting backends: PyMca_ or SILX_
.. _PyMca: https://github.com/vasole/pymca
.. _SILX: https://github.com/silx-kit/silx
"""
from sloth.gui.jupyx import run_from_ipython
import os
import numpy as np
HAS_SILX = False
try:
from silx.math.fit import fittheories, bgtheories
from silx.math.fit.fitmanager import FitManager
HAS_SILX = True
except ImportError:
pass
HAS_PYMCA = False
HAS_PYMCA5 = False
try:
from PyMca5.PyMcaMath.fitting import Specfit, SpecfitFunctions
HAS_PYMCA5 = True
except ImportError:
try:
from PyMca import Specfit, SpecfitFunctions
HAS_PYMCA = True
except ImportError:
from sloth import NullClass
Specfit = NullClass
SpecfitFunctions = NullClass
pass
IN_IPYTHON = run_from_ipython()
##############
# SILX BASED #
##############
def fit_silx(x, y, theory=None, bkg=None):
"""fit a peak with using silx library
Parameters
==========
x, y: data 1D arrays
theory : string [None]
available theories:
+---------------------+
| Gaussians |
| Lorentz |
| Area Gaussians |
| Area Lorentz |
| Pseudo-Voigt Line |
| Area Pseudo-Voigt |
| Split Gaussian |
| Split Lorentz |
| Split Pseudo-Voigt |
| Step Down |
| Step Up |
| Slit |
| Atan |
| Hypermet |
| Degree 2 Polynomial |
| Degree 3 Polynomial |
| Degree 4 Polynomial |
| Degree 5 Polynomial |
+---------------------+
bkg : string [None]
available bkg theories:
+---------------------+
| No Background |
| Constant |
| Linear |
| Strip |
| Snip |
| Degree 2 Polynomial |
| Degree 3 Polynomial |
| Degree 4 Polynomial |
| Degree 5 Polynomial |
+---------------------+
Returns
=======
yfit : fit array like x
"""
fit = FitManager()
fit.loadtheories(fittheories)
fit.loadbgtheories(bgtheories)
fit.setdata(x=x, y=y)
yfit = y
_kwerror = False
if (theory is None):
print('fit theory not given! choose one of the following:')
print('\n'.join(map(str, fit.theories.keys())))
_kwerror = True
if (bkg is None):
print('fit background not given! choose one of the following:')
print('\n'.join(map(str, fit.bgtheories.keys())))
_kwerror = True
if _kwerror:
return yfit
fit.settheory(theory)
fit.setbackground(bkg)
try:
fit.estimate()
fit.runfit()
yfit = fit.gendata()
except Exception:
print('ERROR: fit_silx FAILED!!!')
#print('FWHM: {0}'.format(fwhm(x,yfit,method='bin')))
return yfit
###############
# PYMCA BASED #
###############
def fit_splitpvoigt(x, y, dy=False,
theory='Split Pseudo-Voigt', bkg='Constant',
conf=None, npeaks=1,
show_infos=True, show_res=True, plot=True, **kws):
"""simple wrapper to PyMca.Specfit
the goal is to fit (automagically) a set of 1D data (x,y) with an
asymmetric PseudoVoigt (splitpvoigt) function plus a constant
background
Parameters
----------
x, y : data arrays
dy : boolean or float, False
error bar on y. If dy==True: dy=np.sqrt(y) or give an
explicit array
theory : str, ['Split Pseudo-Voigt',
'Gaussians',
'Lorentz',
'Area Gaussians',
'Area Lorentz',
'Pseudo-Voigt Line',
'Area Pseudo-Voigt',
'Split Gaussian',
'Split Lorentz',
'Step Down',
'Step Up',
'Slit',
'Atan',
'Hypermet',
'Periodic Gaussians']
bkg : str, ['Constant', 'No Background', 'Linear', 'Internal']
conf : dictionary, None
to tune Specfit configuration, default:
'FwhmPoints' : int(len(y)/5.
'Sensitivity' : 5.
'EtaFlag' : 1 (force eta between 0 and 1)
'WeightFlag' : 0 (do not weight by noise)
'AutoScaling' : 1 (auto scale y)
'AutoFwhm' : 1 (auto guess fwhm)
npeaks : int, 1
limit the number of split-PseudoVoigt peaks to guess
show_infos: boolean, True
print information on the peakfit setup
show_res : boolean, True
print fit results to standard output
plot : boolean, True
plot data, fit and residual with PyMca (ScanWindow)
Returns
-------
PyMca.Specfit.Specfit, PyMca.ScanWindow.ScanWindow (None if plot=False)
"""
# default fit configuration
fwhmpts_guess = int(len(y) / 10.) # guess 1/10 of points resides in fwhm
iflat = int(len(y) / 5.) # guess 1/5 of points are flat or out of peak
sens_guess = np.mean(y[:iflat]) + np.mean(y[-iflat:])
dconf = {'FwhmPoints': fwhmpts_guess, # fwhm points
'Sensitivity': max(5., sens_guess), # sensitivity
'Yscaling': 1.0, # Y factor
'ForcePeakPresence': 1, # 1 force peak presence
'HeightAreaFlag': 1, # 1 force positive Height/Area
'PositionFlag': 1, # 1 force position in interval
'PosFwhmFlag': 1, # 1 force positive FWHM
'SameFwhmFlag': 0, # 1 force same FWHM
'EtaFlag': 1, # 1 to force Eta between 0 and 1
'NoConstrainsFlag': 0, # 1 ignore Restrains
'WeightFlag': 0,
'AutoScaling': 0,
'AutoFwhm': 0}
# force update config of bkg and theory
dconf.update({'fitbkg': bkg,
'fittheory': theory})
if conf is not None:
dconf.update(conf)
if show_infos:
print('{0:=^64}'.format('FIT INFOS'))
# init Specfit object
fit = Specfit.Specfit()
# set the data
if dy is True:
dy = np.sqrt(y)
fit.setdata(x=x, y=y, sigmay=dy)
elif dy is False:
fit.setdata(x=x, y=y)
else:
fit.setdata(x=x, y=y, sigmay=dy)
# initialize fitting functions
if not len(fit.theorylist):
funsFile = "SpecfitFunctions.py"
if not os.path.exists(funsFile):
funsFile = os.path.join(os.path.dirname(Specfit.__file__), funsFile)
fit.importfun(funsFile)
# limit number of estimated peaks
def _estimate_splitpvoigt2(xx, yy, zzz, xscaling=1.0, yscaling=None, npeaks=npeaks):
"""wrap to SpecfitFunctions.estimate_splitpvoigt to limit to npeaks"""
currpars, currcons = sff.estimate_splitpvoigt(xx, yy, zzz, xscaling, yscaling)
#print(currpars)
#print(currcons)
newpars = currpars[:5 * npeaks]
newcons = currcons[:][:5 * npeaks]
return newpars, newcons
# force Split Pseudo-Voigt estimate to a single peak
sff = SpecfitFunctions.SpecfitFunctions()
conf_fun = sff.configure(**dconf)
fit.addtheory('Split Pseudo-Voigt', sff.splitpvoigt, ['Height', 'Position', 'LowFWHM', 'HighFWHM', 'Eta'], _estimate_splitpvoigt2)
theory = 'Split Pseudo-Voigt'
if show_infos:
print('backgroung: {0}'.format(bkg))
print('theory: {0} {1}'.format(npeaks, theory))
# update configuration
fit_conf = fit.configure(**dconf)
# set theory and bkg
fit.settheory(theory)
fit.setbackground(bkg)
# automatic estimate and fit
fit_est = fit.estimate()
fit.startfit()
# RESULTS
yfit = fit.gendata(x=x, parameters=fit.paramlist)
residual = y - yfit
# outputs
pk_area = np.trapz(yfit, x=x)
fit.resdict = fit_results(fit, output='dict', pk_info=True)
fit.resdict.update({'area': pk_area})
fit.yfit = yfit
fit.residual = residual
# print results
if show_res is True:
fit_results(fit, output='print', pk_info=True)
# plot
if plot is True:
if HAS_PYMCA5:
from PyMca5.PyMcaGui import ScanWindow
elif HAS_PYMCA:
from PyMca import ScanWindow
else:
return fit, 0
if (not IN_IPYTHON):
from PyMca import PyMcaQt as qt
qtApp = qt.QApplication([])
pw = ScanWindow.ScanWindow()
pw.setGeometry(50, 50, 800, 800)
pw.addCurve(x, y, legend='data', replace=True)
pw.addCurve(x, yfit, legend='yfit', replace=False)
pw.addCurve(x, residual, legend='residual', replace=False)
pw.show()
if (not IN_IPYTHON):
qtApp.exec_()
if plot is True:
return fit, pw
else:
return fit, None
def fit_results(fitobj, output='print', pk_info=True):
""" simple report of fit results
Arguments
---------
fitobj : Specfit object
output : 'string'
'list'
'print'
'dict'
"""
# templates
tmpl_head = '{0:=^64}'
tmpl_parhead = '{0:<4} {1:=^12} {2:=^15} {3:=^13} ({4:=^14})'
tmpl_parlist = '{0:<4} {1:<12} {2:< 15.5f} {3:< 13.5f} ({4:^ 14.5f})'
tmpl_pkhead = '{0:<4} {1:=^12} {2:=^14} {3:=^12} ({4:=^14})'
tmpl_pkinfo = '{0:<4} {1:< 12.5f} {2:< 15.5f} {3:< 13.5f} {4:^ 14.5f}'
# HEADER
out = [tmpl_head.format(' FIT RESULTS ')]
# STATISTICS
out.append(tmpl_head.format(' STATISTICS '))
out.append('chi_squared = {0}'.format(fitobj.chisq))
# FITTED PARAMETERS
out.append(tmpl_head.format(' FITTED PARAMETERS '))
out.append(tmpl_parhead.format('#idx', ' name ', ' fitresult ', ' sigma ', ' estimation '))
for idx, d in enumerate(fitobj.paramlist):
out.append(tmpl_parlist.format(idx, d['name'], d['fitresult'], d['sigma'], d['estimation']))
# PEAK INFO
if pk_info:
out.append(tmpl_head.format(' PEAK INFO '))
bkg = fitobj.bkgfun.__str__().lower()
if 'none' in bkg:
ioff = 0
#const = 0
elif 'constant' in bkg:
ioff = 1
#const = fitobj.paramlist[0]['fitresult']
elif 'linear' in bkg:
ioff = 2
#const = fitobj.paramlist[0]['fitresult']
elif 'internal' in bkg:
ioff = 3
#const = fitobj.paramlist[2]['fitresult']
else:
pass
pk_height = fitobj.paramlist[ioff]['fitresult'] - np.min(fitobj.ydata)
pk_pos = fitobj.paramlist[ioff + 1]['fitresult']
pk_fwhm = (fitobj.paramlist[ioff + 2]['fitresult'] / 2.) + (fitobj.paramlist[ioff + 3]['fitresult'] / 2.)
pk_cfwhm = pk_pos + (pk_fwhm / 2. - (fitobj.paramlist[ioff + 2]['fitresult'] / 2.))
# collect peak infos in a dictionary
resdict = {'height': pk_height,
'position': pk_pos,
'fwhm': pk_fwhm,
'cfwhm': pk_cfwhm}
out.append(tmpl_pkhead.format('#', ' hght-min ', ' position ', ' FHWM ', ' cen_FWHM '))
out.append(tmpl_pkinfo.format('1', pk_height, pk_pos, pk_fwhm, pk_cfwhm))
# OUTPUT
if ('print' in output.lower()):
print('\n'.join(out))
elif ('list' in output.lower()):
return out
elif ('dict' in output.lower()):
return resdict
else:
return '\n'.join(out)
if __name__ == '__main__':
pass
# TESTS are in examples/peakfit_tests.py
| maurov/xraysloth | sloth/fit/peakfit_silx.py | Python | bsd-3-clause | 11,844 | [
"Gaussian"
] | b54346be38f9d1edbeedf12a853b8e94976a70e4a7a367236f794f420059afc0 |
# -*- coding: utf-8 -*-
#
# Copyright 2008 - 2014 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import time
#from PyQt4 import QtCore, QtGui
from pythics.settings import _TRY_PYSIDE
try:
if not _TRY_PYSIDE:
raise ImportError()
import PySide.QtCore as _QtCore
import PySide.QtGui as _QtGui
QtCore = _QtCore
QtGui = _QtGui
Signal = QtCore.Signal
USES_PYSIDE = True
except ImportError:
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
import PyQt4.QtCore as _QtCore
import PyQt4.QtGui as _QtGui
QtCore = _QtCore
QtGui = _QtGui
Signal = QtCore.pyqtSignal
USES_PYSIDE = False
class ImageLabel(QtGui.QWidget):
def __init__(self, control, *args):
QtGui.QWidget.__init__(self, *args)
self.control = control
self.image = None
self.draw_w = 1
self.draw_h = 1
size_policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
self.setSizePolicy(size_policy)
def setScale(self, scale):
self.scale = scale
if self.image is not None:
self.draw_w = self.scale*self.image_w
self.draw_h = self.scale*self.image_h
if not self.fit:
self.setMinimumSize(self.draw_w, self.draw_h)
self.repaint()
def setFit(self, fit):
self.fit = fit
if fit:
self.setMinimumSize(1, 1)
else:
self.setMinimumSize(self.draw_w, self.draw_h)
self.repaint()
def setImage(self, image):
self.image = image
self.image_w = image.width()
self.image_h = image.height()
self.draw_w = self.scale*self.image_w
self.draw_h = self.scale*self.image_h
if self.fit:
self.setMinimumSize(1,1)
else:
self.setMinimumSize(self.draw_w, self.draw_h)
self.repaint()
def paintEvent(self, event):
if self.image is not None:
painter = QtGui.QPainter(self)
if self.fit:
scale = min(self.width()/float(self.image_w),
self.height()/float(self.image_h))
x_offset = max(int((self.width()-self.image_w*scale)/2.0), 0)
y_offset = max(int((self.height()-self.image_h*scale)/2.0), 0)
else:
x_offset = max(int((self.width()-self.draw_w)/2.0), 0)
y_offset = max(int((self.height()-self.draw_h)/2.0), 0)
scale = self.scale
painter.save()
painter.translate(x_offset, y_offset)
painter.scale(scale, scale)
painter.drawImage(0, 0, self.image)
painter.restore()
self.scale = scale
self.x_offset = x_offset
self.y_offset = y_offset
def mousePressEvent(self, event):
x = int((event.x()-self.x_offset)/float(self.scale))
y = int((event.y()-self.y_offset)/float(self.scale))
if event.button() == QtCore.Qt.LeftButton:
self.control._mouse_press_left(x, y)
elif event.button() == QtCore.Qt.RightButton:
self.control._mouse_press_right(x, y)
else:
# pass on other buttons to base class
QtGui.QWidget.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
x = int((event.x()-self.x_offset)/float(self.scale))
y = int((event.y()-self.y_offset)/float(self.scale))
if event.button() == QtCore.Qt.LeftButton:
self.control._mouse_release_left(x, y)
elif event.button() == QtCore.Qt.RightButton:
self.control._mouse_release_right(x, y)
else:
# pass on other buttons to base class
QtGui.QWidget.mouseReleaseEvent(self, event)
def mouseDoubleClickEvent(self, event):
x = int((event.x()-self.x_offset)/float(self.scale))
y = int((event.y()-self.y_offset)/float(self.scale))
if event.button() == QtCore.Qt.LeftButton:
self.control._mouse_double_click_left(x, y)
elif event.button() == QtCore.Qt.RightButton:
self.control._mouse_double_click_right(x, y)
else:
# pass on other buttons to base class
QtGui.QWidget.mouseDoubleClickEvent(self, event)
#
# multiturn rotatable control to select a value from a set of choices
#
class Knob(QtGui.QDial):
choiceChanged = Signal(int)
def __init__(self, steps=50, choices=['None'], acceleration=5.0, tracking=False, wrapping=False):
QtGui.QDial.__init__(self)
self.setNotchesVisible(True)
self.setStyle(QtGui.QStyleFactory.create('plastique'))
self.steps = steps
self.choices = choices
self.choice_tracking = tracking
self.index_wrapping = wrapping
self.setWrapping(True)
self.setTracking(True)
self.last_position = 0
self.last_index = 0
self.setRange(0, self.steps)
self.last_time = time.time()
self.acceleration = acceleration
def sliderChange(self, change):
if change == 3: # QtGui.QAbstractSlider.SliderChange
new_position = self.value()
new_time = time.time()
ccw_change = (self.last_position - new_position) % self.steps
cw_change = (new_position - self.last_position) % self.steps
if ccw_change < cw_change:
# knob was turned CCW
accel = int(ccw_change*self.acceleration/((abs(new_time-self.last_time)+0.001)*self.steps))
if self.index_wrapping:
new_index = (self.last_index - ccw_change - accel) % len(self.choices)
else:
new_index = max(0, self.last_index - ccw_change - accel)
else:
# knob was turned CW
accel = int(cw_change*self.acceleration/((abs(new_time-self.last_time)+0.001)*self.steps))
if self.index_wrapping:
new_index = (self.last_index + cw_change + accel) % len(self.choices)
else:
new_index = min(len(self.choices)-1, self.last_index + cw_change + accel)
self.last_position = new_position
if new_index != self.last_index:
self.last_index = new_index
if self.choice_tracking:
self.choiceChanged.emit(self.getChoiceValue())
self.last_time = new_time
QtGui.QDial.sliderChange(self, change)
def mouseReleaseEvent(self, event):
if not self.choice_tracking:
self.choiceChanged.emit(self.getChoiceValue())
QtGui.QDial.mouseReleaseEvent(self, event)
def keyPressEvent(self, event):
if not self.choice_tracking:
self.choiceChanged.emit(self.getChoiceValue())
QtGui.QDial.keyPressEvent(self, event)
def wheelEvent(self, event):
if not self.choice_tracking:
self.choiceChanged.emit(self.getChoiceValue())
QtGui.QDial.wheelEvent(self, event)
def setChoices(self, choices):
self.choices = choices
def getChoices(self):
return self.choices
def setChoiceValue(self, value):
self.last_index = self.choices.index(value)
# request repaint
self.update()
def getChoiceValue(self):
return self.choices[self.last_index]
def paintEvent(self,event):
QtGui.QDial.paintEvent(self,event)
painter = QtGui.QPainter(self)
painter.setPen(QtGui.QPen(self.palette().color(QtGui.QPalette.Text), 1))
painter.drawText(self.rect(), QtCore.Qt.AlignCenter, str(self.getChoiceValue()));
#
# floating point NumBox control
#
class ScientificDoubleSpinBox(QtGui.QDoubleSpinBox):
def __init__(self, parent=None, format_str='%g', *args, **kwargs):
QtGui.QDoubleSpinBox.__init__(self, parent, *args, **kwargs)
self.format_str = format_str
self.validator = QtGui.QDoubleValidator()
def textFromValue(self, value):
return self.format_str % value
def validate(self, text, pos):
return self.validator.validate(text, pos)
def valueFromText(self, text):
return float(text)
#
# Python shell widget for Shell control
#
class Shell(QtGui.QPlainTextEdit):
def __init__(self, push_func, prompt='$> ', parent=None, font='Consolas', font_size=10, *args, **kwargs):
QtGui.QPlainTextEdit.__init__(self, parent, *args, **kwargs)
self.prompt = prompt
self.history = []
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(font, font_size, QtGui.QFont.Normal))
self.push = push_func
def newPrompt(self):
self.appendPlainText(self.prompt)
self.moveCursor(QtGui.QTextCursor.End)
def continuePrompt(self):
self.appendPlainText('.' * len(self.prompt))
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = unicode(doc.findBlockByLineNumber(doc.lineCount() - 1).text())
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
self.moveCursor(QtGui.QTextCursor.End)
self.moveCursor(QtGui.QTextCursor.StartOfLine, QtGui.QTextCursor.KeepAnchor)
for i in range(len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Right, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def addToHistory(self, command):
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
return self.textCursor().columnNumber() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
self.push(command)
def keyPressEvent(self, event):
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_D and event.modifiers() == QtCore.Qt.ControlModifier:
self.close()
super(Shell, self).keyPressEvent(event)
| LunarLanding/Pythics | pythics/control_helpers.py | Python | gpl-3.0 | 12,528 | [
"Brian"
] | 74bbbda57743098551bf143cdfe94b5700122d1c8eea544b72ff33a7e9d1a3a5 |
"""
Acceptance tests for Content Libraries in Studio
"""
from ddt import ddt, data
from nose.plugins.attrib import attr
from flaky import flaky
from .base_studio_test import StudioLibraryTest
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.utils import add_component
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.users import LibraryUsersPage
@attr('shard_2')
@ddt
class LibraryEditPageTest(StudioLibraryTest):
"""
Test the functionality of the library edit page.
"""
def setUp(self):
"""
Ensure a library exists and navigate to the library edit page.
"""
super(LibraryEditPageTest, self).setUp()
self.lib_page = LibraryEditPage(self.browser, self.library_key)
self.lib_page.visit()
self.lib_page.wait_until_ready()
def test_page_header(self):
"""
Scenario: Ensure that the library's name is displayed in the header and title.
Given I have a library in Studio
And I navigate to Library Page in Studio
Then I can see library name in page header title
And I can see library name in browser page title
"""
self.assertIn(self.library_info['display_name'], self.lib_page.get_header_title())
self.assertIn(self.library_info['display_name'], self.browser.title)
def test_add_duplicate_delete_actions(self):
"""
Scenario: Ensure that we can add an HTML block, duplicate it, then delete the original.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
Then there are no XBlocks displayed
When I add Text XBlock
Then one XBlock is displayed
When I duplicate first XBlock
Then two XBlocks are displayed
And those XBlocks locators' are different
When I delete first XBlock
Then one XBlock is displayed
And displayed XBlock are second one
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
# Create a new block:
add_component(self.lib_page, "html", "Text")
self.assertEqual(len(self.lib_page.xblocks), 1)
first_block_id = self.lib_page.xblocks[0].locator
# Duplicate the block:
self.lib_page.click_duplicate_button(first_block_id)
self.assertEqual(len(self.lib_page.xblocks), 2)
second_block_id = self.lib_page.xblocks[1].locator
self.assertNotEqual(first_block_id, second_block_id)
# Delete the first block:
self.lib_page.click_delete_button(first_block_id, confirm=True)
self.assertEqual(len(self.lib_page.xblocks), 1)
self.assertEqual(self.lib_page.xblocks[0].locator, second_block_id)
def test_no_edit_visibility_button(self):
"""
Scenario: Ensure that library xblocks do not have 'edit visibility' buttons.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
When I add Text XBlock
Then one XBlock is displayed
And no 'edit visibility' button is shown
"""
add_component(self.lib_page, "html", "Text")
self.assertFalse(self.lib_page.xblocks[0].has_edit_visibility_button)
def test_add_edit_xblock(self):
"""
Scenario: Ensure that we can add an XBlock, edit it, then see the resulting changes.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
Then there are no XBlocks displayed
When I add Multiple Choice XBlock
Then one XBlock is displayed
When I edit first XBlock
And I go to basic tab
And set it's text to a fairly trivial question about Battlestar Galactica
And save XBlock
Then one XBlock is displayed
And first XBlock student content contains at least part of text I set
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
# Create a new problem block:
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
# Edit it:
problem_block.edit()
problem_block.open_basic_tab()
problem_block.set_codemirror_text(
"""
>>Who is "Starbuck"?<<
(x) Kara Thrace
( ) William Adama
( ) Laura Roslin
( ) Lee Adama
( ) Gaius Baltar
"""
)
problem_block.save_settings()
# Check that the save worked:
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
self.assertIn("Laura Roslin", problem_block.student_content)
def test_no_discussion_button(self):
"""
Ensure the UI is not loaded for adding discussions.
"""
self.assertFalse(self.browser.find_elements_by_css_selector('span.large-discussion-icon'))
@flaky # TODO fix this, see TNL-2322
def test_library_pagination(self):
"""
Scenario: Ensure that adding several XBlocks to a library results in pagination.
Given that I have a library in Studio with no XBlocks
And I create 10 Multiple Choice XBlocks
Then 10 are displayed.
When I add one more Multiple Choice XBlock
Then 1 XBlock will be displayed
When I delete that XBlock
Then 10 are displayed.
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
for _ in range(10):
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 10)
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 10)
@data('top', 'bottom')
def test_nav_present_but_disabled(self, position):
"""
Scenario: Ensure that the navigation buttons aren't active when there aren't enough XBlocks.
Given that I have a library in Studio with no XBlocks
The Navigation buttons should be disabled.
When I add a multiple choice problem
The Navigation buttons should be disabled.
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
self.assertTrue(self.lib_page.nav_disabled(position))
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertTrue(self.lib_page.nav_disabled(position))
def test_delete_deletes_only_desired_block(self):
"""
Scenario: Ensure that when deleting XBlock only desired XBlock is deleted
Given that I have a library in Studio with no XBlocks
And I create Blank Common Problem XBlock
And I create Checkboxes XBlock
When I delete Blank Problem XBlock
Then Checkboxes XBlock is not deleted
And Blank Common Problem XBlock is deleted
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
add_component(self.lib_page, "problem", "Blank Common Problem")
add_component(self.lib_page, "problem", "Checkboxes")
self.assertEqual(len(self.lib_page.xblocks), 2)
self.assertIn("Blank Common Problem", self.lib_page.xblocks[0].name)
self.assertIn("Checkboxes", self.lib_page.xblocks[1].name)
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
self.assertIn("Checkboxes", problem_block.name)
@attr('shard_2')
@ddt
class LibraryNavigationTest(StudioLibraryTest):
"""
Test common Navigation actions
"""
def setUp(self):
"""
Ensure a library exists and navigate to the library edit page.
"""
super(LibraryNavigationTest, self).setUp()
self.lib_page = LibraryEditPage(self.browser, self.library_key)
self.lib_page.visit()
self.lib_page.wait_until_ready()
def populate_library_fixture(self, library_fixture):
"""
Create four pages worth of XBlocks, and offset by one so each is named
after the number they should be in line by the user's perception.
"""
self.blocks = [XBlockFixtureDesc('html', str(i)) for i in xrange(1, 41)]
library_fixture.add_children(*self.blocks)
def test_arbitrary_page_selection(self):
"""
Scenario: I can pick a specific page number of a Library at will.
Given that I have a library in Studio with 40 XBlocks
When I go to the 3rd page
The first XBlock should be the 21st XBlock
When I go to the 4th Page
The first XBlock should be the 31st XBlock
When I go to the 1st page
The first XBlock should be the 1st XBlock
When I go to the 2nd page
The first XBlock should be the 11th XBlock
"""
self.lib_page.go_to_page(3)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.go_to_page(4)
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.lib_page.go_to_page(1)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.go_to_page(2)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
def test_bogus_page_selection(self):
"""
Scenario: I can't pick a nonsense page number of a Library
Given that I have a library in Studio with 40 XBlocks
When I attempt to go to the 'a'th page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the 5th page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the -1st page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the 0th page
The input field will be cleared and no change of XBlocks will be made
"""
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.go_to_page('a')
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(-1)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(5)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(0)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
@data('top', 'bottom')
def test_nav_buttons(self, position):
"""
Scenario: Ensure that the navigation buttons work.
Given that I have a library in Studio with 40 XBlocks
The previous button should be disabled.
The first XBlock should be the 1st XBlock
Then if I hit the next button
The first XBlock should be the 11th XBlock
Then if I hit the next button
The first XBlock should be the 21st XBlock
Then if I hit the next button
The first XBlock should be the 31st XBlock
And the next button should be disabled
Then if I hit the previous button
The first XBlock should be the 21st XBlock
Then if I hit the previous button
The first XBlock should be the 11th XBlock
Then if I hit the previous button
The first XBlock should be the 1st XBlock
And the previous button should be disabled
"""
# Check forward navigation
self.assertTrue(self.lib_page.nav_disabled(position, ['previous']))
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.lib_page.nav_disabled(position, ['next'])
# Check backward navigation
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertTrue(self.lib_page.nav_disabled(position, ['previous']))
def test_library_pagination(self):
"""
Scenario: Ensure that adding several XBlocks to a library results in pagination.
Given that I have a library in Studio with 40 XBlocks
Then 10 are displayed
And the first XBlock will be the 1st one
And I'm on the 1st page
When I add 1 Multiple Choice XBlock
Then 1 XBlock will be displayed
And I'm on the 5th page
The first XBlock will be the newest one
When I delete that XBlock
Then 10 are displayed
And I'm on the 4th page
And the first XBlock is the 31st one
And the last XBlock is the 40th one.
"""
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '1')
self.assertEqual(self.lib_page.xblocks[0].name, '1')
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
self.assertEqual(self.lib_page.get_page_number(), '5')
self.assertEqual(self.lib_page.xblocks[0].name, "Multiple Choice")
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '4')
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.assertEqual(self.lib_page.xblocks[-1].name, '40')
def test_delete_shifts_blocks(self):
"""
Scenario: Ensure that removing an XBlock shifts other blocks back.
Given that I have a library in Studio with 40 XBlocks
Then 10 are displayed
And I will be on the first page
When I delete the third XBlock
There will be 10 displayed
And the first XBlock will be the first one
And the last XBlock will be the 11th one
And I will be on the first page
"""
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '1')
self.lib_page.click_delete_button(self.lib_page.xblocks[2].locator, confirm=True)
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertEqual(self.lib_page.xblocks[-1].name, '11')
self.assertEqual(self.lib_page.get_page_number(), '1')
def test_previews(self):
"""
Scenario: Ensure the user is able to hide previews of XBlocks.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I click the toggle previews button
Then the previews are visible
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertTrue(self.lib_page.are_previews_showing())
def test_previews_navigation(self):
"""
Scenario: Ensure preview settings persist across navigation.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
And click the next page button
Then the previews will not be visible
And the first XBlock will be the 11th one
And the last XBlock will be the 20th one
And when I click the toggle previews button
And I click the previous page button
Then the previews will be visible
And the first XBlock will be the first one
And the last XBlock will be the 11th one
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
# Which set of arrows shouldn't matter for this test.
self.lib_page.move_forward('top')
self.assertFalse(self.lib_page.are_previews_showing())
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.assertEqual(self.lib_page.xblocks[-1].name, '20')
self.lib_page.toggle_previews()
self.lib_page.move_back('top')
self.assertTrue(self.lib_page.are_previews_showing())
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertEqual(self.lib_page.xblocks[-1].name, '10')
def test_preview_state_persistance(self):
"""
Scenario: Ensure preview state persists between page loads.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
And I revisit the page
Then the previews will not be visible
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.lib_page.visit()
self.lib_page.wait_until_ready()
self.assertFalse(self.lib_page.are_previews_showing())
def test_preview_add_xblock(self):
"""
Scenario: Ensure previews are shown when adding new blocks, regardless of preview setting.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I add an XBlock
Then I will be on the 5th page
And the XBlock will have loaded a preview
And when I revisit the library
And I go to the 5th page
Then the top XBlock will be the one I added
And it will not have a preview
And when I add an XBlock
Then the XBlock I added will have a preview
And the top XBlock will not have one.
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
add_component(self.lib_page, "problem", "Checkboxes")
self.assertEqual(self.lib_page.get_page_number(), '5')
first_added = self.lib_page.xblocks[0]
self.assertIn("Checkboxes", first_added.name)
self.assertFalse(self.lib_page.xblocks[0].is_placeholder())
self.lib_page.visit()
self.lib_page.wait_until_ready()
self.lib_page.go_to_page(5)
self.assertTrue(self.lib_page.xblocks[0].is_placeholder())
add_component(self.lib_page, "problem", "Multiple Choice")
# DOM has detatched the element since last assignment
first_added = self.lib_page.xblocks[0]
second_added = self.lib_page.xblocks[1]
self.assertIn("Multiple Choice", second_added.name)
self.assertFalse(second_added.is_placeholder())
self.assertTrue(first_added.is_placeholder())
def test_edit_with_preview(self):
"""
Scenario: Editing an XBlock should show me a preview even if previews are hidden.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I edit the first XBlock
Then the first XBlock will show a preview
And the other XBlocks will still be placeholders
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
target = self.lib_page.xblocks[0]
target.edit()
target.save_settings()
self.assertFalse(target.is_placeholder())
self.assertTrue(all([xblock.is_placeholder() for xblock in self.lib_page.xblocks[1:]]))
def test_duplicate_xblock_pagination(self):
"""
Scenario: Duplicating an XBlock should not shift the page if the XBlock is not at the end.
Given that I have a library in Studio with 40 XBlocks
When I duplicate the third XBlock
Then the page should not change
And the duplicate XBlock should be there
And it should show a preview
And there should not be more than 10 XBlocks visible.
"""
third_block_id = self.lib_page.xblocks[2].locator
self.lib_page.click_duplicate_button(third_block_id)
self.lib_page.wait_until_ready()
target = self.lib_page.xblocks[3]
self.assertIn('Duplicate', target.name)
self.assertFalse(target.is_placeholder())
self.assertEqual(len(self.lib_page.xblocks), 10)
def test_duplicate_xblock_pagination_end(self):
"""
Scenario: Duplicating an XBlock if it's the last one should bring me to the next page with a preview.
Given that I have a library in Studio with 40 XBlocks
And when I hide previews
And I duplicate the last XBlock
The page should change to page 2
And the duplicate XBlock should be the first XBlock
And it should not be a placeholder
"""
self.lib_page.toggle_previews()
last_block_id = self.lib_page.xblocks[-1].locator
self.lib_page.click_duplicate_button(last_block_id)
self.lib_page.wait_until_ready()
self.assertEqual(self.lib_page.get_page_number(), '2')
target_block = self.lib_page.xblocks[0]
self.assertIn('Duplicate', target_block.name)
self.assertFalse(target_block.is_placeholder())
class LibraryUsersPageTest(StudioLibraryTest):
"""
Test the functionality of the library "Instructor Access" page.
"""
def setUp(self):
super(LibraryUsersPageTest, self).setUp()
# Create a second user for use in these tests:
AutoAuthPage(self.browser, username="second", email="second@example.com", no_login=True).visit()
self.page = LibraryUsersPage(self.browser, self.library_key)
self.page.visit()
def _refresh_page(self):
"""
Reload the page.
"""
self.page = LibraryUsersPage(self.browser, self.library_key)
self.page.visit()
def test_user_management(self):
"""
Scenario: Ensure that we can edit the permissions of users.
Given I have a library in Studio where I am the only admin
assigned (which is the default for a newly-created library)
And I navigate to Library "Instructor Access" Page in Studio
Then there should be one user listed (myself), and I must
not be able to remove myself or my instructor privilege.
When I click Add Instructor
Then I see a form to complete
When I complete the form and submit it
Then I can see the new user is listed as a "User" of the library
When I click to Add Staff permissions to the new user
Then I can see the new user has staff permissions and that I am now
able to promote them to an Admin or remove their staff permissions.
When I click to Add Admin permissions to the new user
Then I can see the new user has admin permissions and that I can now
remove Admin permissions from either user.
"""
def check_is_only_admin(user):
"""
Ensure user is an admin user and cannot be removed.
(There must always be at least one admin user.)
"""
self.assertIn("admin", user.role_label.lower())
self.assertFalse(user.can_promote)
self.assertFalse(user.can_demote)
self.assertFalse(user.can_delete)
self.assertTrue(user.has_no_change_warning)
self.assertIn("Promote another member to Admin to remove your admin rights", user.no_change_warning_text)
self.assertEqual(len(self.page.users), 1)
user = self.page.users[0]
self.assertTrue(user.is_current_user)
check_is_only_admin(user)
# Add a new user:
self.assertTrue(self.page.has_add_button)
self.assertFalse(self.page.new_user_form_visible)
self.page.click_add_button()
self.assertTrue(self.page.new_user_form_visible)
self.page.set_new_user_email('second@example.com')
self.page.click_submit_new_user_form()
# Check the new user's listing:
def get_two_users():
"""
Expect two users to be listed, one being me, and another user.
Returns me, them
"""
users = self.page.users
self.assertEqual(len(users), 2)
self.assertEqual(len([u for u in users if u.is_current_user]), 1)
if users[0].is_current_user:
return users[0], users[1]
else:
return users[1], users[0]
self._refresh_page()
user_me, them = get_two_users()
check_is_only_admin(user_me)
self.assertIn("user", them.role_label.lower())
self.assertTrue(them.can_promote)
self.assertIn("Add Staff Access", them.promote_button_text)
self.assertFalse(them.can_demote)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Add Staff permissions to the new user:
them.click_promote()
self._refresh_page()
user_me, them = get_two_users()
check_is_only_admin(user_me)
self.assertIn("staff", them.role_label.lower())
self.assertTrue(them.can_promote)
self.assertIn("Add Admin Access", them.promote_button_text)
self.assertTrue(them.can_demote)
self.assertIn("Remove Staff Access", them.demote_button_text)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Add Admin permissions to the new user:
them.click_promote()
self._refresh_page()
user_me, them = get_two_users()
self.assertIn("admin", user_me.role_label.lower())
self.assertFalse(user_me.can_promote)
self.assertTrue(user_me.can_demote)
self.assertTrue(user_me.can_delete)
self.assertFalse(user_me.has_no_change_warning)
self.assertIn("admin", them.role_label.lower())
self.assertFalse(them.can_promote)
self.assertTrue(them.can_demote)
self.assertIn("Remove Admin Access", them.demote_button_text)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Delete the new user:
them.click_delete()
self._refresh_page()
self.assertEqual(len(self.page.users), 1)
user = self.page.users[0]
self.assertTrue(user.is_current_user)
@attr('a11y')
class StudioLibraryA11yTest(StudioLibraryTest):
"""
Class to test Studio pages accessibility.
"""
def test_lib_edit_page_a11y(self):
"""
Check accessibility of LibraryEditPage.
"""
lib_page = LibraryEditPage(self.browser, self.library_key)
lib_page.visit()
lib_page.wait_until_ready()
# There are several existing color contrast errors on this page,
# we will ignore this error in the test until we fix them.
lib_page.a11y_audit.config.set_rules({
"ignore": [
'color-contrast', # TODO: AC-225
'link-href', # TODO: AC-226
'nav-aria-label', # TODO: AC-227
'icon-aria-hidden', # TODO: AC-229
],
})
lib_page.a11y_audit.check_for_accessibility_errors()
| antoviaque/edx-platform | common/test/acceptance/tests/studio/test_studio_library.py | Python | agpl-3.0 | 28,054 | [
"VisIt"
] | 1cf182ed44307367356324fb84719ddacc0314335c8ecbd0a7a8ced8343b1bfa |
########################################################################
# File: ReplicateAndRegister.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/13 18:49:12
########################################################################
""" :mod: ReplicateAndRegister
==========================
.. module: ReplicateAndRegister
:synopsis: ReplicateAndRegister operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
ReplicateAndRegister operation handler
"""
__RCSID__ = "$Id$"
# #
# @file ReplicateAndRegister.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/13 18:49:28
# @brief Definition of ReplicateAndRegister class.
# # imports
import re
from collections import defaultdict
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.Adler import compareAdler, hexAdlerToInt, intAdlerToHex
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.FTS3Operation import FTS3TransferOperation
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC.DataManagementSystem.Client.FTS3Client import FTS3Client
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
def filterReplicas(opFile, logger=None, dataManager=None):
""" filter out banned/invalid source SEs """
if logger is None:
logger = gLogger
if dataManager is None:
dataManager = DataManager()
log = logger.getSubLogger("filterReplicas")
result = defaultdict(list)
replicas = dataManager.getActiveReplicas(opFile.LFN, getUrl=False)
if not replicas["OK"]:
log.error('Failed to get active replicas', replicas["Message"])
return replicas
reNotExists = re.compile(r".*such file.*")
replicas = replicas["Value"]
failed = replicas["Failed"].get(opFile.LFN, "")
if reNotExists.match(failed.lower()):
opFile.Status = "Failed"
opFile.Error = failed
return S_ERROR(failed)
replicas = replicas["Successful"].get(opFile.LFN, {})
noReplicas = False
if not replicas:
allReplicas = dataManager.getReplicas(opFile.LFN, getUrl=False)
if allReplicas['OK']:
allReplicas = allReplicas['Value']['Successful'].get(opFile.LFN, {})
if not allReplicas:
result['NoReplicas'].append(None)
noReplicas = True
else:
# There are replicas but we cannot get metadata because the replica is not active
result['NoActiveReplicas'] += list(allReplicas)
log.verbose("File has no%s replica in File Catalog" % ('' if noReplicas else ' active'), opFile.LFN)
else:
return allReplicas
if not opFile.Checksum or hexAdlerToInt(opFile.Checksum) is False:
# Set Checksum to FC checksum if not set in the request
fcMetadata = FileCatalog().getFileMetadata(opFile.LFN)
fcChecksum = fcMetadata.get(
'Value',
{}).get(
'Successful',
{}).get(
opFile.LFN,
{}).get('Checksum')
# Replace opFile.Checksum if it doesn't match a valid FC checksum
if fcChecksum:
if hexAdlerToInt(fcChecksum) is not False:
opFile.Checksum = fcChecksum
opFile.ChecksumType = fcMetadata['Value']['Successful'][opFile.LFN].get('ChecksumType', 'Adler32')
else:
opFile.Checksum = None
# If no replica was found, return what we collected as information
if not replicas:
return S_OK(result)
for repSEName in replicas:
repSEMetadata = StorageElement(repSEName).getFileMetadata(opFile.LFN)
error = repSEMetadata.get('Message', repSEMetadata.get('Value', {}).get('Failed', {}).get(opFile.LFN))
if error:
log.warn('unable to get metadata at %s for %s' % (repSEName, opFile.LFN), error.replace('\n', ''))
if 'File does not exist' in error:
result['NoReplicas'].append(repSEName)
else:
result["NoMetadata"].append(repSEName)
elif not noReplicas:
repSEMetadata = repSEMetadata['Value']['Successful'][opFile.LFN]
seChecksum = hexAdlerToInt(repSEMetadata.get("Checksum"))
# As from here seChecksum is an integer or False, not a hex string!
if seChecksum is False and opFile.Checksum:
result['NoMetadata'].append(repSEName)
elif not seChecksum and opFile.Checksum:
opFile.Checksum = None
opFile.ChecksumType = None
elif seChecksum and (not opFile.Checksum or opFile.Checksum == 'False'):
# Use the SE checksum (convert to hex) and force type to be Adler32
opFile.Checksum = intAdlerToHex(seChecksum)
opFile.ChecksumType = 'Adler32'
if not opFile.Checksum or not seChecksum or compareAdler(
intAdlerToHex(seChecksum), opFile.Checksum):
# # All checksums are OK
result["Valid"].append(repSEName)
else:
log.warn(" %s checksum mismatch, FC: '%s' @%s: '%s'" %
(opFile.LFN, opFile.Checksum, repSEName, intAdlerToHex(seChecksum)))
result["Bad"].append(repSEName)
else:
# If a replica was found somewhere, don't set the file as no replicas
result['NoReplicas'] = []
return S_OK(result)
########################################################################
class ReplicateAndRegister(DMSRequestOperationsBase):
"""
.. class:: ReplicateAndRegister
ReplicateAndRegister operation handler
"""
def __init__(self, operation=None, csPath=None):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
super(ReplicateAndRegister, self).__init__(operation, csPath)
# # own gMonitor stuff for files
gMonitor.registerActivity("ReplicateAndRegisterAtt", "Replicate and register attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("ReplicateOK", "Replications successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("ReplicateFail", "Replications failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterOK", "Registrations successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterFail", "Registrations failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
# # for FTS
gMonitor.registerActivity("FTSScheduleAtt", "Files schedule attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSScheduleOK", "File schedule successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSScheduleFail", "File schedule failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
# # SE cache
# Clients
self.fc = FileCatalog()
def __call__(self):
""" call me maybe """
# # check replicas first
checkReplicas = self.__checkReplicas()
if not checkReplicas["OK"]:
self.log.error('Failed to check replicas', checkReplicas["Message"])
if hasattr(self, "FTSMode") and getattr(self, "FTSMode"):
bannedGroups = getattr(self, "FTSBannedGroups") if hasattr(self, "FTSBannedGroups") else ()
if self.request.OwnerGroup in bannedGroups:
self.log.verbose("usage of FTS system is banned for request's owner")
return self.dmTransfer()
if getattr(self, 'UseNewFTS3', False):
return self.fts3Transfer()
else:
return self.ftsTransfer()
return self.dmTransfer()
def __checkReplicas(self):
""" check done replicas and update file states """
waitingFiles = dict([(opFile.LFN, opFile) for opFile in self.operation
if opFile.Status in ("Waiting", "Scheduled")])
targetSESet = set(self.operation.targetSEList)
replicas = self.fc.getReplicas(waitingFiles.keys())
if not replicas["OK"]:
self.log.error('Failed to get replicas', replicas["Message"])
return replicas
reMissing = re.compile(r".*such file.*")
for failedLFN, errStr in replicas["Value"]["Failed"].iteritems():
waitingFiles[failedLFN].Error = errStr
if reMissing.search(errStr.lower()):
self.log.error("File does not exists", failedLFN)
gMonitor.addMark("ReplicateFail", len(targetSESet))
waitingFiles[failedLFN].Status = "Failed"
for successfulLFN, reps in replicas["Value"]["Successful"].iteritems():
if targetSESet.issubset(set(reps)):
self.log.info("file %s has been replicated to all targets" % successfulLFN)
waitingFiles[successfulLFN].Status = "Done"
return S_OK()
def _addMetadataToFiles(self, toSchedule):
""" Add metadata to those files that need to be scheduled through FTS
toSchedule is a dictionary:
{'lfn1': opFile, 'lfn2': opFile}
"""
if toSchedule:
self.log.info("found %s files to schedule, getting metadata from FC" % len(toSchedule))
else:
self.log.verbose("No files to schedule")
return S_OK([])
res = self.fc.getFileMetadata(toSchedule.keys())
if not res['OK']:
return res
else:
if res['Value']['Failed']:
self.log.warn("Can't schedule %d files: problems getting the metadata: %s" %
(len(res['Value']['Failed']), ', '.join(res['Value']['Failed'])))
metadata = res['Value']['Successful']
filesToSchedule = {}
for lfn, lfnMetadata in metadata.iteritems():
opFileToSchedule = toSchedule[lfn][0]
opFileToSchedule.GUID = lfnMetadata['GUID']
# In principle this is defined already in filterReplicas()
if not opFileToSchedule.Checksum:
opFileToSchedule.Checksum = metadata[lfn]['Checksum']
opFileToSchedule.ChecksumType = metadata[lfn]['ChecksumType']
opFileToSchedule.Size = metadata[lfn]['Size']
filesToSchedule[opFileToSchedule.LFN] = opFileToSchedule
return S_OK(filesToSchedule)
def _filterReplicas(self, opFile):
""" filter out banned/invalid source SEs """
return filterReplicas(opFile, logger=self.log, dataManager=self.dm)
def ftsTransfer(self):
""" replicate and register using FTS """
self.log.info("scheduling files in FTS...")
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark("FTSScheduleAtt")
gMonitor.addMark("FTSScheduleFail")
return bannedTargets
if bannedTargets['Value']:
return S_OK("%s targets are banned for writing" % ",".join(bannedTargets['Value']))
# Can continue now
self.log.verbose("No targets banned for writing")
toSchedule = {}
delayExecution = 0
errors = defaultdict(int)
for opFile in self.getWaitingFilesList():
opFile.Error = ''
gMonitor.addMark("FTSScheduleAtt")
# # check replicas
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas.get("Valid")
noMetaReplicas = replicas.get("NoMetadata")
noReplicas = replicas.get('NoReplicas')
badReplicas = replicas.get('Bad')
noActiveReplicas = replicas.get('NoActiveReplicas')
if validReplicas:
validTargets = list(set(self.operation.targetSEList) - set(validReplicas))
if not validTargets:
self.log.info("file %s is already present at all targets" % opFile.LFN)
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [opFile, validReplicas, validTargets]
else:
gMonitor.addMark("FTSScheduleFail")
if noMetaReplicas:
err = "Couldn't get metadata"
errors[err] += 1
self.log.verbose(
"unable to schedule '%s', %s at %s" %
(opFile.LFN, err, ','.join(noMetaReplicas)))
opFile.Error = err
elif noReplicas:
err = "File doesn't exist"
errors[err] += 1
self.log.error("Unable to schedule transfer",
"%s %s at %s" % (opFile.LFN, err, ','.join(noReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif badReplicas:
err = "All replicas have a bad checksum"
errors[err] += 1
self.log.error("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(badReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif noActiveReplicas:
err = "No active replica found"
errors[err] += 1
self.log.verbose("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(noActiveReplicas)))
opFile.Error = err
# All source SEs are banned, delay execution by 1 hour
delayExecution = 60
if delayExecution:
self.log.info("Delay execution of the request by %d minutes" % delayExecution)
self.request.delayNextExecution(delayExecution)
# Log error counts
for error, count in errors.iteritems():
self.log.error(error, 'for %d files' % count)
filesToScheduleList = []
res = self._addMetadataToFiles(toSchedule)
if not res['OK']:
return res
else:
filesToSchedule = res['Value']
for lfn in filesToSchedule:
filesToScheduleList.append((filesToSchedule[lfn][0].toJSON()['Value'],
toSchedule[lfn][1],
toSchedule[lfn][2]))
if filesToScheduleList:
ftsSchedule = FTSClient().ftsSchedule(self.request.RequestID,
self.operation.OperationID,
filesToScheduleList)
if not ftsSchedule["OK"]:
self.log.error("Completely failed to schedule to FTS:", ftsSchedule["Message"])
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
if not ftsSchedule:
return S_OK()
self.log.info("%d files have been scheduled to FTS" % len(ftsSchedule['Successful']))
for opFile in self.operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
gMonitor.addMark("FTSScheduleOK", 1)
opFile.Status = "Scheduled"
self.log.debug("%s has been scheduled for FTS" % opFile.LFN)
elif fileID in ftsSchedule["Failed"]:
gMonitor.addMark("FTSScheduleFail", 1)
opFile.Error = ftsSchedule["Failed"][fileID]
if 'sourceSURL equals to targetSURL' in opFile.Error:
# In this case there is no need to continue
opFile.Status = 'Failed'
self.log.warn("unable to schedule %s for FTS: %s" % (opFile.LFN, opFile.Error))
else:
self.log.info("No files to schedule after metadata checks")
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer(fromFTS=True)
def _checkExistingFTS3Operations(self):
"""
Check if there are ongoing FTS3Operation for the current RMS Operation
Under some conditions, we can be trying to schedule files while
there is still an FTS transfer going on. This typically happens
when the REA hangs. To prevent further race condition, we check
if there are FTS3Operations in a non Final state matching the
current operation ID. If so, we put the corresponding files in
scheduled mode. We will then wait till the FTS3 Operation performs
the callback
:returns: S_OK with True if we can go on, False if we should stop the processing
"""
res = FTS3Client().getOperationsFromRMSOpID(self.operation.OperationID)
if not res['OK']:
self.log.debug(
"Could not get FTS3Operations matching OperationID",
self.operation.OperationID)
return res
existingFTSOperations = res['Value']
# It is ok to have FTS Operations in a final state, so we
# care only about the others
unfinishedFTSOperations = [
ops for ops in existingFTSOperations if ops.status not in FTS3TransferOperation.FINAL_STATES]
if not unfinishedFTSOperations:
self.log.debug("No ongoing FTS3Operations, all good")
return S_OK(True)
self.log.warn("Some FTS3Operations already exist for the RMS Operation:",
[op.operationID for op in unfinishedFTSOperations])
# This would really be a screwed up situation !
if len(unfinishedFTSOperations) > 1:
self.log.warn("That's a serious problem !!")
# We take the rmsFileID of the files in the Operations,
# find the corresponding File object, and set them scheduled
rmsFileIDsToSetScheduled = set(
[ftsFile.rmsFileID for ftsOp in unfinishedFTSOperations for ftsFile in ftsOp.ftsFiles])
for opFile in self.operation:
# If it is in the DB, it has a FileID
opFileID = opFile.FileID
if opFileID in rmsFileIDsToSetScheduled:
self.log.warn("Setting RMSFile as already scheduled", opFileID)
opFile.Status = "Scheduled"
# We return here such that the Request is set back to Scheduled in the DB
# With no further modification
return S_OK(False)
def fts3Transfer(self):
""" replicate and register using FTS3 """
self.log.info("scheduling files in FTS3...")
# Check first if we do not have ongoing transfers
res = self._checkExistingFTS3Operations()
if not res['OK']:
return res
# if res['Value'] is False
# it means that there are ongoing transfers
# and we should stop here
if res['Value'] is False:
# return S_OK such that the request is put back
return S_OK()
fts3Files = []
toSchedule = {}
# Dict which maps the FileID to the object
rmsFilesIds = {}
for opFile in self.getWaitingFilesList():
rmsFilesIds[opFile.FileID] = opFile
opFile.Error = ''
gMonitor.addMark("FTSScheduleAtt")
# # check replicas
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas['NoReplicas']
badReplicas = replicas['Bad']
noPFN = replicas['NoPFN']
if validReplicas:
validTargets = list(set(self.operation.targetSEList) - set(validReplicas))
if not validTargets:
self.log.info("file %s is already present at all targets" % opFile.LFN)
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [opFile, validTargets]
else:
gMonitor.addMark("FTSScheduleFail")
if noMetaReplicas:
self.log.warn("unable to schedule '%s', couldn't get metadata at %s" % (opFile.LFN, ','.join(noMetaReplicas)))
opFile.Error = "Couldn't get metadata"
elif noReplicas:
self.log.error(
"Unable to schedule transfer", "File %s doesn't exist at %s" %
(opFile.LFN, ','.join(noReplicas)))
opFile.Error = 'No replicas found'
opFile.Status = 'Failed'
elif badReplicas:
self.log.error(
"Unable to schedule transfer",
"File %s, all replicas have a bad checksum at %s" %
(opFile.LFN,
','.join(badReplicas)))
opFile.Error = 'All replicas have a bad checksum'
opFile.Status = 'Failed'
elif noPFN:
self.log.warn(
"unable to schedule %s, could not get a PFN at %s" %
(opFile.LFN, ','.join(noPFN)))
res = self._addMetadataToFiles(toSchedule)
if not res['OK']:
return res
else:
filesToSchedule = res['Value']
for lfn in filesToSchedule:
opFile = filesToSchedule[lfn]
validTargets = toSchedule[lfn][1]
for targetSE in validTargets:
ftsFile = FTS3File.fromRMSFile(opFile, targetSE)
fts3Files.append(ftsFile)
if fts3Files:
res = Registry.getUsernameForDN(self.request.OwnerDN)
if not res['OK']:
self.log.error(
"Cannot get username for DN", "%s %s" %
(self.request.OwnerDN, res['Message']))
return res
username = res['Value']
fts3Operation = FTS3TransferOperation.fromRMSObjects(self.request, self.operation, username)
fts3Operation.ftsFiles = fts3Files
ftsSchedule = FTS3Client().persistOperation(fts3Operation)
if not ftsSchedule["OK"]:
self.log.error("Completely failed to schedule to FTS3:", ftsSchedule["Message"])
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
self.log.info("Scheduled with FTS3Operation id %s" % ftsSchedule)
self.log.info("%d files have been scheduled to FTS3" % len(fts3Files))
for ftsFile in fts3Files:
opFile = rmsFilesIds[ftsFile.rmsFileID]
gMonitor.addMark("FTSScheduleOK", 1)
opFile.Status = "Scheduled"
self.log.debug("%s has been scheduled for FTS" % opFile.LFN)
else:
self.log.info("No files to schedule after metadata checks")
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer(fromFTS=True)
def dmTransfer(self, fromFTS=False):
""" replicate and register using dataManager """
# # get waiting files. If none just return
# # source SE
sourceSE = self.operation.SourceSE if self.operation.SourceSE else None
if sourceSE:
# # check source se for read
bannedSource = self.checkSEsRSS(sourceSE, 'ReadAccess')
if not bannedSource["OK"]:
gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation))
gMonitor.addMark("ReplicateFail", len(self.operation))
return bannedSource
if bannedSource["Value"]:
self.operation.Error = "SourceSE %s is banned for reading" % sourceSE
self.log.info(self.operation.Error)
return S_OK(self.operation.Error)
# # check targetSEs for write
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation))
gMonitor.addMark("ReplicateFail", len(self.operation))
return bannedTargets
if bannedTargets['Value']:
self.operation.Error = "%s targets are banned for writing" % ",".join(bannedTargets['Value'])
return S_OK(self.operation.Error)
# Can continue now
self.log.verbose("No targets banned for writing")
waitingFiles = self.getWaitingFilesList()
if not waitingFiles:
return S_OK()
# # loop over files
if fromFTS:
self.log.info("Trying transfer using replica manager as FTS failed")
else:
self.log.info("Transferring files using Data manager...")
errors = defaultdict(int)
delayExecution = 0
for opFile in waitingFiles:
if opFile.Error in ("Couldn't get metadata",
"File doesn't exist",
'No active replica found',
"All replicas have a bad checksum",):
err = "File already in error status"
errors[err] += 1
gMonitor.addMark("ReplicateAndRegisterAtt", 1)
opFile.Error = ''
lfn = opFile.LFN
# Check if replica is at the specified source
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
self.log.error('Failed to check replicas', replicas["Message"])
continue
replicas = replicas["Value"]
validReplicas = replicas.get("Valid")
noMetaReplicas = replicas.get("NoMetadata")
noReplicas = replicas.get('NoReplicas')
badReplicas = replicas.get('Bad')
noActiveReplicas = replicas.get('NoActiveReplicas')
if not validReplicas:
gMonitor.addMark("ReplicateFail")
if noMetaReplicas:
err = "Couldn't get metadata"
errors[err] += 1
self.log.verbose(
"unable to replicate '%s', couldn't get metadata at %s" %
(opFile.LFN, ','.join(noMetaReplicas)))
opFile.Error = err
elif noReplicas:
err = "File doesn't exist"
errors[err] += 1
self.log.verbose(
"Unable to replicate", "File %s doesn't exist at %s" %
(opFile.LFN, ','.join(noReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif badReplicas:
err = "All replicas have a bad checksum"
errors[err] += 1
self.log.error(
"Unable to replicate", "%s, all replicas have a bad checksum at %s" %
(opFile.LFN, ','.join(badReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif noActiveReplicas:
err = "No active replica found"
errors[err] += 1
self.log.verbose("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(noActiveReplicas)))
opFile.Error = err
# All source SEs are banned, delay execution by 1 hour
delayExecution = 60
continue
# # get the first one in the list
if sourceSE not in validReplicas:
if sourceSE:
err = "File not at specified source"
errors[err] += 1
self.log.warn(
"%s is not at specified sourceSE %s, changed to %s" %
(lfn, sourceSE, validReplicas[0]))
sourceSE = validReplicas[0]
# # loop over targetSE
catalogs = self.operation.Catalog
if catalogs:
catalogs = [cat.strip() for cat in catalogs.split(',')]
for targetSE in self.operation.targetSEList:
# # call DataManager
if targetSE in validReplicas:
self.log.warn("Request to replicate %s to an existing location: %s" % (lfn, targetSE))
opFile.Status = 'Done'
continue
res = self.dm.replicateAndRegister(lfn, targetSE, sourceSE=sourceSE, catalog=catalogs)
if res["OK"]:
if lfn in res["Value"]["Successful"]:
if "replicate" in res["Value"]["Successful"][lfn]:
repTime = res["Value"]["Successful"][lfn]["replicate"]
prString = "file %s replicated at %s in %s s." % (lfn, targetSE, repTime)
gMonitor.addMark("ReplicateOK", 1)
if "register" in res["Value"]["Successful"][lfn]:
gMonitor.addMark("RegisterOK", 1)
regTime = res["Value"]["Successful"][lfn]["register"]
prString += ' and registered in %s s.' % regTime
self.log.info(prString)
else:
gMonitor.addMark("RegisterFail", 1)
prString += " but failed to register"
self.log.warn(prString)
opFile.Error = "Failed to register"
# # add register replica operation
registerOperation = self.getRegisterOperation(
opFile, targetSE, type='RegisterReplica')
self.request.insertAfter(registerOperation, self.operation)
else:
self.log.error("Failed to replicate", "%s to %s" % (lfn, targetSE))
gMonitor.addMark("ReplicateFail", 1)
opFile.Error = "Failed to replicate"
else:
gMonitor.addMark("ReplicateFail", 1)
reason = res["Value"]["Failed"][lfn]
self.log.error(
"Failed to replicate and register", "File %s at %s:" %
(lfn, targetSE), reason)
opFile.Error = reason
else:
gMonitor.addMark("ReplicateFail", 1)
opFile.Error = "DataManager error: %s" % res["Message"]
self.log.error("DataManager error", res["Message"])
if not opFile.Error:
if len(self.operation.targetSEList) > 1:
self.log.info("file %s has been replicated to all targetSEs" % lfn)
opFile.Status = "Done"
# Log error counts
if delayExecution:
self.log.info("Delay execution of the request by %d minutes" % delayExecution)
self.request.delayNextExecution(delayExecution)
for error, count in errors.iteritems():
self.log.error(error, 'for %d files' % count)
return S_OK()
| arrabito/DIRAC | DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py | Python | gpl-3.0 | 28,823 | [
"DIRAC"
] | ca0c2c23401940cedcd1ec00065d2d75e6ffa3c68c7e91ed41781527eefe58e9 |
import cgen as c
from sympy import Not
from devito.arch import AMDGPUX, NVIDIAX
from devito.ir import (Block, Call, Conditional, List, Prodder, ParallelIteration,
ParallelBlock, PointerCast, While, FindNodes, Transformer)
from devito.mpi.routines import IrecvCall, IsendCall
from devito.passes.iet.definitions import DataManager, DeviceAwareDataManager
from devito.passes.iet.engine import iet_pass
from devito.passes.iet.orchestration import Orchestrator
from devito.passes.iet.parpragma import (PragmaSimdTransformer, PragmaShmTransformer,
PragmaDeviceAwareTransformer, PragmaLangBB)
from devito.passes.iet.languages.C import CBB
from devito.passes.iet.languages.utils import make_clause_reduction
from devito.symbolics import CondEq, DefFunction
__all__ = ['SimdOmpizer', 'Ompizer', 'OmpIteration', 'OmpRegion',
'DeviceOmpizer', 'DeviceOmpIteration', 'DeviceOmpDataManager',
'OmpDataManager', 'OmpOrchestrator']
class OmpRegion(ParallelBlock):
@classmethod
def _make_header(cls, nthreads, private=None):
private = ('private(%s)' % ','.join(private)) if private else ''
return c.Pragma('omp parallel num_threads(%s) %s' % (nthreads.name, private))
class OmpIteration(ParallelIteration):
@classmethod
def _make_construct(cls, parallel=False, **kwargs):
if parallel:
return 'omp parallel for'
else:
return 'omp for'
@classmethod
def _make_clauses(cls, ncollapse=None, chunk_size=None, nthreads=None,
reduction=None, schedule=None, **kwargs):
clauses = []
clauses.append('collapse(%d)' % (ncollapse or 1))
if chunk_size is not False:
clauses.append('schedule(%s,%s)' % (schedule or 'dynamic',
chunk_size or 1))
if nthreads:
clauses.append('num_threads(%s)' % nthreads)
if reduction:
clauses.append(make_clause_reduction(reduction))
return clauses
@classmethod
def _process_kwargs(cls, **kwargs):
kwargs = super()._process_kwargs(**kwargs)
kwargs.pop('schedule', None)
kwargs.pop('parallel', False)
kwargs.pop('chunk_size', None)
kwargs.pop('nthreads', None)
return kwargs
class DeviceOmpIteration(OmpIteration):
@classmethod
def _make_construct(cls, **kwargs):
return 'omp target teams distribute parallel for'
@classmethod
def _make_clauses(cls, **kwargs):
kwargs['chunk_size'] = False
return super()._make_clauses(**kwargs)
@classmethod
def _process_kwargs(cls, **kwargs):
kwargs = super()._process_kwargs(**kwargs)
kwargs.pop('gpu_fit', None)
return kwargs
class ThreadedProdder(Conditional, Prodder):
_traversable = []
def __init__(self, prodder):
# Atomic-ize any single-thread Prodders in the parallel tree
condition = CondEq(Ompizer.lang['thread-num'], 0)
# Prod within a while loop until all communications have completed
# In other words, the thread delegated to prodding is entrapped for as long
# as it's required
prod_until = Not(DefFunction(prodder.name, [i.name for i in prodder.arguments]))
then_body = List(header=c.Comment('Entrap thread until comms have completed'),
body=While(prod_until))
Conditional.__init__(self, condition, then_body)
Prodder.__init__(self, prodder.name, prodder.arguments, periodic=prodder.periodic)
class OmpBB(PragmaLangBB):
mapper = {
# Misc
'name': 'OpenMP',
'header': 'omp.h',
# Platform mapping
AMDGPUX: None,
NVIDIAX: None,
# Runtime library
'init': None,
'thread-num': DefFunction('omp_get_thread_num'),
'num-devices': lambda args:
DefFunction('omp_get_num_devices', args),
'set-device': lambda args:
Call('omp_set_default_device', args),
# Pragmas
'simd-for': c.Pragma('omp simd'),
'simd-for-aligned': lambda i, j: c.Pragma('omp simd aligned(%s:%d)' % (i, j)),
'atomic': c.Pragma('omp atomic update'),
'map-enter-to': lambda i, j:
c.Pragma('omp target enter data map(to: %s%s)' % (i, j)),
'map-enter-alloc': lambda i, j:
c.Pragma('omp target enter data map(alloc: %s%s)' % (i, j)),
'map-update': lambda i, j:
c.Pragma('omp target update from(%s%s)' % (i, j)),
'map-update-host': lambda i, j:
c.Pragma('omp target update from(%s%s)' % (i, j)),
'map-update-device': lambda i, j:
c.Pragma('omp target update to(%s%s)' % (i, j)),
'map-release': lambda i, j, k:
c.Pragma('omp target exit data map(release: %s%s)%s'
% (i, j, k)),
'map-exit-delete': lambda i, j, k:
c.Pragma('omp target exit data map(delete: %s%s)%s'
% (i, j, k)),
}
mapper.update(CBB.mapper)
Region = OmpRegion
HostIteration = OmpIteration
DeviceIteration = DeviceOmpIteration
Prodder = ThreadedProdder
class DeviceOmpBB(OmpBB):
# NOTE: Work around clang>=10 issue concerning offloading arrays declared
# with an `__attribute__(aligned(...))` qualifier
PointerCast = lambda *args: PointerCast(*args, alignment=False)
class SimdOmpizer(PragmaSimdTransformer):
lang = OmpBB
class Ompizer(PragmaShmTransformer):
lang = OmpBB
class DeviceOmpizer(PragmaDeviceAwareTransformer):
lang = DeviceOmpBB
@iet_pass
def make_gpudirect(self, iet):
mapper = {}
for node in FindNodes((IsendCall, IrecvCall)).visit(iet):
header = c.Pragma('omp target data use_device_ptr(%s)' %
node.arguments[0].name)
mapper[node] = Block(header=header, body=node)
iet = Transformer(mapper).visit(iet)
return iet, {}
class OmpDataManager(DataManager):
lang = OmpBB
class DeviceOmpDataManager(DeviceAwareDataManager):
lang = DeviceOmpBB
class OmpOrchestrator(Orchestrator):
lang = DeviceOmpBB
| opesci/devito | devito/passes/iet/languages/openmp.py | Python | mit | 6,258 | [
"VisIt"
] | f32970fa5960986598bb6a810aa72a5f449068d8444a8c96b5b54303e994bed8 |
"""
Voronoi analysis of atom positions
author Gerd and Rama
part of pycrosocpy
"""
import numpy as np
import sys
# from skimage.feature import peak_local_max
from skimage.feature import blob_log
from sklearn.cluster import KMeans
from scipy.spatial import cKDTree
import scipy.optimize as optimization
import pyTEMlib.probe_tools as probe_tools
import pyTEMlib.file_tools as ft
import sidpy
from tqdm import trange
def find_atoms(image, atom_size=0.1, threshold=-1.):
""" Find atoms is a simple wrapper for blob_log in skimage.feature
Parameters
----------
image: sidpy.Dataset
the image to find atoms
atom_size: float
visible size of atom blob diameter in nm gives minimal distance between found blobs
threshold: float
threshold for blob finder; (usually between 0.001 and 1.0) for threshold <= 0 we use the RMS contrast
Returns
-------
atoms: numpy array(Nx3)
atoms positions and radius of blob
"""
if not isinstance(image, sidpy.Dataset):
raise TypeError('We need a sidpy.Dataset')
if image.data_type.name != 'IMAGE':
raise TypeError('We need sidpy.Dataset of sidpy.Datatype: IMAGE')
if not isinstance(atom_size, (float, int)):
raise TypeError('atom_size parameter has to be a number')
if not isinstance(threshold, float):
raise TypeError('threshold parameter has to be a float number')
scale_x = ft.get_slope(image.dim_0)
im = np.array(image-image.min())
im = im/im.max()
if threshold < 0.:
threshold = np.std(im)
atoms = blob_log(im, max_sigma=atom_size/scale_x, threshold=threshold)
return atoms
def atoms_clustering(atoms, mid_atoms, number_of_clusters=3, nearest_neighbours=7):
""" A wrapper for sklearn.cluster kmeans clustering of atoms.
Parameters
----------
atoms: list or np.array (Nx2)
list of all atoms
mid_atoms: list or np.array (Nx2)
atoms to be evaluated
number_of_clusters: int
number of clusters to sort (ini=3)
nearest_neighbours: int
number of nearest neighbours evaluated
Returns
-------
clusters, distances, indices: numpy arrays
"""
# get distances
nn_tree = cKDTree(np.array(atoms)[:, 0:2])
distances, indices = nn_tree.query(np.array(mid_atoms)[:, 0:2], nearest_neighbours)
# Clustering
k_means = KMeans(n_clusters=number_of_clusters, random_state=0) # Fixing the RNG in kmeans
k_means.fit(distances)
clusters = k_means.predict(distances)
return clusters, distances, indices
def gauss_difference(params, area):
"""
Difference between part of an image and a Gaussian
This function is used int he atom refine function of pyTEMlib
Parameters
----------
params: list
list of Gaussian parameters [width, position_x, position_y, intensity]
area: numpy array
2D matrix = part of an image
Returns
-------
numpy array: flattened array of difference
"""
gauss = probe_tools.make_gauss(area.shape[0], area.shape[1], width=params[0], x0=params[1], y0=params[2],
intensity=params[3])
return (area - gauss).flatten()
def atom_refine(image, atoms, radius, max_int=0, min_int=0, max_dist=4):
"""Fits a Gaussian in a blob of an image
Parameters
----------
image: np.array or sidpy Dataset
atoms: list or np.array
positions of atoms
radius: float
radius of circular mask to define fitting of Gaussian
max_int: float
optional - maximum intensity to be considered for fitting (to exclude contaminated areas for example)
min_int: float
optional - minimum intensity to be considered for fitting (to exclude contaminated holes for example)
max_dist: float
optional - maximum distance of movement of Gaussian during fitting
Returns
-------
sym: dict
dictionary containing new atom positions and other output such as intensity of the fitted Gaussian
"""
rr = int(radius + 0.5) # atom radius
print('using radius ', rr, 'pixels')
pixels = np.linspace(0, 2 * rr, 2 * rr + 1) - rr
x, y = np.meshgrid(pixels, pixels)
mask = (x ** 2 + y ** 2) < rr ** 2
guess = [rr * 2, 0.0, 0.0, 1]
sym = {'number_of_atoms': len(atoms)}
volume = []
position = []
intensities = []
maximum_area = []
new_atoms = []
gauss_width = []
gauss_amplitude = []
gauss_intensity = []
done = 0
for i in trange(len(atoms)):
x, y = atoms[i][0:2]
x = int(x)
y = int(y)
area = image[x - rr:x + rr + 1, y - rr:y + rr + 1]
append = False
if (x - rr) < 0 or y - rr < 0 or x + rr + 1 > image.shape[0] or y + rr + 1 > image.shape[1]:
position.append(-1)
intensities.append(-1.)
maximum_area.append(-1.)
else:
position.append(1)
intensities.append((area * mask).sum())
maximum_area.append((area * mask).max())
if max_int > 0:
if area.sum() < max_int:
if area.sum() > min_int:
append = True
elif area.sum() > min_int:
append = True
pout = [0, 0, 0, 0]
if append:
if (x - rr) < 0 or y - rr < 0 or x + rr + 1 > image.shape[0] or y + rr + 1 > image.shape[1]:
pass
else:
[pout, _] = optimization.leastsq(gauss_difference, guess, args=area)
if (abs(pout[1]) > max_dist) or (abs(pout[2]) > max_dist):
pout = [0, 0, 0, 0]
volume.append(2 * np.pi * pout[3] * pout[0] * pout[0])
new_atoms.append([x + pout[1], y + pout[2]]) # ,pout[0], volume)) #,pout[3]))
if all(v == 0 for v in pout):
gauss_intensity.append(0.)
else:
gauss = probe_tools.make_gauss(area.shape[0], area.shape[1], width=pout[0], x0=pout[1], y0=pout[2],
intensity=pout[3])
gauss_intensity.append((gauss * mask).sum())
gauss_width.append(pout[0])
gauss_amplitude.append(pout[3])
sym['inside'] = position
sym['intensity_area'] = intensities
sym['maximum_area'] = maximum_area
sym['atoms'] = new_atoms
sym['gauss_width'] = gauss_width
sym['gauss_amplitude'] = gauss_amplitude
sym['gauss_intensity'] = gauss_intensity
sym['gauss_volume'] = volume
return sym
def intensity_area(image, atoms, radius):
"""
integrated intensity of atoms in an image with a mask around each atom of radius radius
"""
rr = int(radius + 0.5) # atom radius
print('using radius ', rr, 'pixels')
pixels = np.linspace(0, 2 * rr, 2 * rr + 1) - rr
x, y = np.meshgrid(pixels, pixels)
mask = np.array((x ** 2 + y ** 2) < rr ** 2)
intensities = []
for i in range(len(atoms)):
x = int(atoms[i][1])
y = int(atoms[i][0])
area = image[x - rr:x + rr + 1, y - rr:y + rr + 1]
if area.shape == mask.shape:
intensities.append((area * mask).sum())
else:
intensities.append(-1)
return intensities
| pycroscopy/pycroscopy | pycroscopy/image/image_atoms.py | Python | mit | 7,225 | [
"Gaussian"
] | 642f98fa5934fbc95717459a2a2c8c648090de4c508aa7e2b6e29d90926ee471 |
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
iPOPO component factories repository
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Documentation strings format
__docformat__ = "restructuredtext en"
# Boot module version
__version__ = "1.0.0"
# ------------------------------------------------------------------------------
# Repository beans
import cohorte.repositories
from cohorte.repositories.beans import Factory
# Pelix
from pelix.utilities import is_string
from pelix.ipopo.decorators import ComponentFactory, Provides, Invalidate, \
Property, Requires, Validate
# Standard library
import ast
import logging
import threading
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class ComponentFactoryVisitor(ast.NodeVisitor):
"""
AST visitor to extract imports and version
"""
# pylint: disable=invalid-name
def __init__(self):
"""
Sets up the visitor
"""
ast.NodeVisitor.__init__(self)
self.factories = set()
self.values = {}
def generic_visit(self, node):
"""
Custom default visit method that avoids to visit further that the
module level.
"""
if type(node) is ast.Module:
ast.NodeVisitor.generic_visit(self, node)
def visit_ClassDef(self, node):
"""
Found a class definition
"""
for decorator in node.decorator_list:
if hasattr(decorator, 'func') \
and decorator.func.id == "ComponentFactory":
name = None
if decorator.args:
# Name: First argument
argument = decorator.args[0]
elif decorator.kwargs:
# Keyword argument
argument = decorator.kwargs['name']
else:
# Default name
name = "{0}Factory".format(node.name)
if name is None:
if hasattr(argument, 'id'):
# Constant
try:
name = self.values[argument.id]
except KeyError:
_logger.debug("Factory name '%s' is unknown (%s)",
argument.id, node.name)
else:
# Literal
try:
name = ast.literal_eval(argument)
except (ValueError, SyntaxError) as ex:
_logger.debug(
"Invalid factory name for class %s: %s",
node.name, ex)
if name is not None:
# Store the factory name
self.factories.add(name)
def visit_Assign(self, node):
"""
Found an assignment
"""
field = getattr(node.targets[0], 'id', None)
if field:
try:
value = ast.literal_eval(node.value)
if is_string(value):
self.values[field] = value
except (ValueError, SyntaxError):
# Ignore errors
pass
def _extract_module_factories(filename):
"""
Extract the version and the imports from the given Python file
:param filename: Path to the file to parse
:return: A (version, [imports]) tuple
:raise ValueError: Unreadable file
"""
visitor = ComponentFactoryVisitor()
try:
with open(filename) as filep:
source = filep.read()
except (OSError, IOError) as ex:
raise ValueError("Error reading {0}: {1}".format(filename, ex))
try:
module = ast.parse(source, filename, 'exec')
except (ValueError, SyntaxError) as ex:
raise ValueError("Error parsing {0}: {1}".format(filename, ex))
try:
visitor.visit(module)
except Exception as ex:
raise ValueError("Error visiting {0}: {1}".format(filename, ex))
return visitor.factories
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-repository-factories-ipopo-factory")
@Provides(cohorte.repositories.SERVICE_REPOSITORY_FACTORIES,
controller="_controller")
@Requires('_repositories', cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS,
True, False,
"({0}=python)".format(cohorte.repositories.PROP_REPOSITORY_LANGUAGE))
@Property('_model', cohorte.repositories.PROP_FACTORY_MODEL, "ipopo")
@Property('_language', cohorte.repositories.PROP_REPOSITORY_LANGUAGE, "python")
class IPopoRepository(object):
"""
Represents a repository
"""
def __init__(self):
"""
Sets up the repository
"""
# Properties
self._model = 'ipopo'
self._language = 'python'
# Service controller
self._controller = False
# Injected service
self._repositories = []
# Name -> [Factories]
self._factories = {}
# Artifact -> [Factories]
self._artifacts = {}
# Some locking
self.__lock = threading.RLock()
def __contains__(self, item):
"""
Tests if the given item is in the repository
:param item: Item to be tested
:return: True if the item is in the repository
"""
if isinstance(item, Factory):
# Test artifact model
if item.model != self._model:
return False
# Test if the name is in the factories
return item.name in self._factories
elif item in self._factories:
# Item matches a factory name
return True
# No match
return False
def __len__(self):
"""
Length of a repository <=> number of individual factories
"""
return sum((len(factories) for factories in self._factories.values()))
def add_artifact(self, artifact):
"""
Adds the factories provided by the given artifact
:param artifact: A Python Module artifact
:raise ValueError: Unreadable file
"""
with self.__lock:
# Extract factories
names = _extract_module_factories(artifact.file)
artifact_list = self._artifacts.setdefault(artifact, [])
for name in names:
# Make the bean
factory = Factory(name, self._language, self._model, artifact)
# Factory
factory_list = self._factories.setdefault(name, [])
if factory not in factory_list:
factory_list.append(factory)
# Artifact
if factory not in artifact_list:
artifact_list.append(factory)
def clear(self):
"""
Clears the repository content
"""
with self.__lock:
self._artifacts.clear()
self._factories.clear()
def find_factories(self, factories):
"""
Returns the list of artifacts that provides the given factories
:param factories: A list of iPOPO factory names
:return: A tuple ({Name -> [Artifacts]}, [Not found factories])
"""
with self.__lock:
factories_set = set(factories)
resolution = {}
unresolved = set()
if not factories:
# Nothing to do...
return resolution, factories_set
for name in factories_set:
try:
# Get the list of factories for this name
factories = self._factories[name]
providers = resolution.setdefault(name, [])
providers.extend(factory.artifact for factory in factories)
except KeyError:
# Factory name not found
unresolved.add(name)
# Sort the artifacts
for artifacts in resolution.values():
artifacts.sort(reverse=True)
return resolution, unresolved
def find_factory(self, factory, artifact_name=None, artifact_version=None):
"""
Find the artifacts that provides the given factory, filtered by name
and version.
:return: The list of artifacts providing the factory, sorted by name
and version
:raise KeyError: Unknown factory
"""
with self.__lock:
# Copy the list of artifacts for this factory
artifacts = [factory.artifact
for factory in self._factories[factory]]
if artifact_name is not None:
# Artifact must be selected
# Prepare the version bean
version = cohorte.repositories.beans.Version(artifact_version)
# Filter results
artifacts = [artifact for artifact in artifacts
if artifact.name == artifact_name
and version.matches(artifact.version)]
if not artifacts:
# No match found
raise KeyError("No matching artifact for {0} -> {1} {2}"
.format(factory, artifact_name, version))
# Sort results
artifacts.sort(reverse=True)
return artifacts
def get_language(self):
"""
Retrieves the language of the artifacts stored in this repository
"""
return self._language
def get_model(self):
"""
Retrieves the component model that can handle the factories of this
repository
"""
return self._model
def load_repositories(self):
"""
Loads the factories according to the repositories
"""
with self.__lock:
if not self._repositories:
# No repository
return
# Walk through artifacts
for repository in self._repositories:
for artifact in repository.walk():
self.add_artifact(artifact)
def __initial_loading(self):
"""
Initial repository loading
"""
self.load_repositories()
self._controller = True
@Validate
def validate(self, context):
"""
Component validated
"""
self._controller = False
# Load repositories in another thread
threading.Thread(target=self.__initial_loading,
name="iPOPO-repository-loader").start()
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
self.clear()
| isandlaTech/cohorte-demos | led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/cohorte/repositories/python/ipopo.py | Python | apache-2.0 | 11,559 | [
"VisIt"
] | 13f7e24601443ee78a6d4b51e5fd553f3925f7ccb002935162fff49a4e08a112 |
#!/usr/bin/env python
import sys, os, time, ConfigParser, shutil
from datetime import datetime, timedelta
from time import strftime
from optparse import OptionParser
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] ) # remove scripts/ from the path
sys.path = new_path
from galaxy import eggs
import galaxy.model.mapping
import pkg_resources
pkg_resources.require( "SQLAlchemy >= 0.4" )
from galaxy.model.orm import and_, eagerload
assert sys.version_info[:2] >= ( 2, 4 )
def main():
parser = OptionParser()
parser.add_option( "-d", "--days", dest="days", action="store", type="int", help="number of days (60)", default=60 )
parser.add_option( "-r", "--remove_from_disk", action="store_true", dest="remove_from_disk", help="remove datasets from disk when purged", default=False )
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
parser.add_option( "-1", "--delete_userless_histories", action="store_true", dest="delete_userless_histories", default=False, help="delete userless histories and datasets" )
parser.add_option( "-2", "--purge_histories", action="store_true", dest="purge_histories", default=False, help="purge deleted histories" )
parser.add_option( "-3", "--purge_datasets", action="store_true", dest="purge_datasets", default=False, help="purge deleted datasets" )
parser.add_option( "-4", "--purge_libraries", action="store_true", dest="purge_libraries", default=False, help="purge deleted libraries" )
parser.add_option( "-5", "--purge_folders", action="store_true", dest="purge_folders", default=False, help="purge deleted library folders" )
( options, args ) = parser.parse_args()
ini_file = args[0]
if not ( options.purge_folders ^ options.delete_userless_histories ^ \
options.purge_libraries ^ options.purge_histories ^ \
options.purge_datasets ):
parser.print_help()
sys.exit(0)
if options.remove_from_disk and options.info_only:
parser.error( "remove_from_disk and info_only are mutually exclusive" )
conf_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
conf_parser.read( ini_file )
configuration = {}
for key, value in conf_parser.items( "app:main" ):
configuration[key] = value
if 'database_connection' in configuration:
database_connection = configuration['database_connection']
else:
database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % configuration["database_file"]
file_path = configuration['file_path']
app = CleanupDatasetsApplication( database_connection=database_connection, file_path=file_path )
cutoff_time = datetime.utcnow() - timedelta( days=options.days )
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "\n# %s - Handling stuff older than %i days\n" % ( now, options.days )
if options.info_only:
print "# Displaying info only ( --info_only )\n"
elif options.remove_from_disk:
print "# Datasets will be removed from disk.\n"
else:
print "# Datasets will NOT be removed from disk.\n"
if options.delete_userless_histories:
delete_userless_histories( app, cutoff_time, info_only = options.info_only )
elif options.purge_histories:
purge_histories( app, cutoff_time, options.remove_from_disk, info_only = options.info_only )
elif options.purge_datasets:
purge_datasets( app, cutoff_time, options.remove_from_disk, info_only = options.info_only )
elif options.purge_libraries:
purge_libraries( app, cutoff_time, options.remove_from_disk, info_only = options.info_only )
elif options.purge_folders:
purge_folders( app, cutoff_time, options.remove_from_disk, info_only = options.info_only )
sys.exit(0)
def delete_userless_histories( app, cutoff_time, info_only = False ):
# Deletes userless histories whose update_time value is older than the cutoff_time.
# The purge history script will handle marking DatasetInstances as deleted.
# Nothing is removed from disk yet.
history_count = 0
print '# The following datasets and associated userless histories have been deleted'
start = time.clock()
histories = app.model.History.filter( and_( app.model.History.table.c.user_id==None,
app.model.History.table.c.deleted==False,
app.model.History.table.c.update_time < cutoff_time ) ).all()# \
for history in histories:
if not info_only:
history.deleted = True
print "%d" % history.id
history_count += 1
app.model.flush()
stop = time.clock()
print "# Deleted %d histories.\n" % ( history_count )
print "Elapsed time: ", stop - start, "\n"
def purge_histories( app, cutoff_time, remove_from_disk, info_only = False ):
# Purges deleted histories whose update_time is older than the cutoff_time.
# The dataset associations of each history are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# history.purged == True simply means that it can no longer be undeleted
# i.e. all associated datasets are marked as deleted
history_count = 0
print '# The following datasets and associated deleted histories have been purged'
start = time.clock()
histories = app.model.History.filter( and_( app.model.History.table.c.deleted==True,
app.model.History.table.c.purged==False,
app.model.History.table.c.update_time < cutoff_time ) ) \
.options( eagerload( 'datasets' ) ).all()
for history in histories:
for dataset_assoc in history.datasets:
_purge_dataset_instance( dataset_assoc, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
if not info_only:
# TODO: should the Delete DefaultHistoryPermissions be deleted here? This was incorrectly
# done in the _list_delete() method of the history controller, so copied it here. Not sure
# if we should ever delete info like this from the db though, so commented out for now...
#for dhp in history.default_permissions:
# dhp.delete()
history.purged = True
print "%d" % history.id
history_count += 1
app.model.flush()
stop = time.clock()
print '# Purged %d histories.' % ( history_count ), '\n'
print "Elapsed time: ", stop - start, "\n"
def purge_libraries( app, cutoff_time, remove_from_disk, info_only = False ):
# Purges deleted libraries whose update_time is older than the cutoff_time.
# The dataset associations of each library are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# library.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
library_count = 0
print '# The following libraries and associated folders have been purged'
start = time.clock()
libraries = app.model.Library.filter( and_( app.model.Library.table.c.deleted==True,
app.model.Library.table.c.purged==False,
app.model.Library.table.c.update_time < cutoff_time ) ).all()
for library in libraries:
_purge_folder( library.root_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
library.purged = True
print "%d" % library.id
library_count += 1
app.model.flush()
stop = time.clock()
print '# Purged %d libraries .' % ( library_count ), '\n'
print "Elapsed time: ", stop - start, "\n"
def purge_folders( app, cutoff_time, remove_from_disk, info_only = False ):
# Purges deleted folders whose update_time is older than the cutoff_time.
# The dataset associations of each folder are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# libraryFolder.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
folder_count = 0
print '# The following folders have been purged'
start = time.clock()
folders = app.model.LibraryFolder.filter( and_( app.model.LibraryFolder.table.c.deleted==True,
app.model.LibraryFolder.table.c.purged==False,
app.model.LibraryFolder.table.c.update_time < cutoff_time ) ).all()
for folder in folders:
_purge_folder( folder, app, remove_from_disk, info_only = info_only )
print "%d" % folder.id
folder_count += 1
stop = time.clock()
print '# Purged %d folders.' % ( folder_count ), '\n'
print "Elapsed time: ", stop - start, "\n"
def purge_datasets( app, cutoff_time, remove_from_disk, info_only = False ):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
print '# The following deleted datasets have been purged'
start = time.clock()
datasets = app.model.Dataset.filter( and_( app.model.Dataset.table.c.deleted==True,
app.model.Dataset.table.c.purgable==True,
app.model.Dataset.table.c.purged==False,
app.model.Dataset.table.c.update_time < cutoff_time ) ).all()
for dataset in datasets:
file_size = dataset.file_size
_purge_dataset( dataset, remove_from_disk, info_only = info_only )
dataset_count += 1
try:
disk_space += file_size
except:
pass
stop = time.clock()
print '# %d datasets purged\n' % dataset_count
if remove_from_disk:
print '# Freed disk space: ', disk_space, '\n'
print "Elapsed time: ", stop - start, "\n"
def _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children = True, info_only = False ):
#purging a dataset instance marks the instance as deleted,
#and marks the dataset as deleted if it is not associated with another DatsetInstance that is not deleted
if not info_only:
dataset_instance.mark_deleted( include_children = include_children )
dataset_instance.clear_associated_files()
dataset_instance.flush()
dataset_instance.dataset.refresh()
if _dataset_is_deletable( dataset_instance.dataset ):
_delete_dataset( dataset_instance.dataset, app, remove_from_disk, info_only = info_only )
#need to purge children here
if include_children:
for child in dataset_instance.children:
_purge_dataset_instance( child, app, remove_from_disk, include_children = include_children, info_only = info_only )
def _dataset_is_deletable( dataset ):
#a dataset is deletable when it no longer has any non-deleted associations
return not bool( dataset.active_history_associations or dataset.active_library_associations )
def _delete_dataset( dataset, app, remove_from_disk, info_only = False ):
#marks a base dataset as deleted, hdas/ldas associated with dataset can no longer be undeleted
#metadata files attached to associated dataset Instances is removed now
if not _dataset_is_deletable( dataset ):
print "# This Dataset (%i) is not deletable, associated Metadata Files will not be removed.\n" % ( dataset.id )
else:
# Mark all associated MetadataFiles as deleted and purged and remove them from disk
metadata_files = []
#lets create a list of metadata files, then perform actions on them
for hda in dataset.history_associations:
for metadata_file in app.model.MetadataFile.filter( app.model.MetadataFile.table.c.hda_id==hda.id ).all():
metadata_files.append( metadata_file )
for lda in dataset.library_associations:
for metadata_file in app.model.MetadataFile.filter( app.model.MetadataFile.table.c.lda_id==lda.id ).all():
metadata_files.append( metadata_file )
for metadata_file in metadata_files:
print "# The following metadata files attached to associations of Dataset '%s' have been purged:" % dataset.id
if not info_only:
if remove_from_disk:
try:
os.unlink( metadata_file.file_name )
except Exception, e:
print "# Error, exception: %s caught attempting to purge metadata file %s\n" %( str( e ), metadata_file.file_name )
metadata_file.purged = True
metadata_file.deleted = True
#metadata_file.flush()
print "%s" % metadata_file.file_name
print
dataset.deleted = True
app.model.flush()
def _purge_dataset( dataset, remove_from_disk, info_only = False ):
if dataset.deleted:
try:
if dataset.purgable and _dataset_is_deletable( dataset ):
print "%s" % dataset.file_name
if not info_only:
# Remove files from disk and update the database
if remove_from_disk:
# TODO: should permissions on the dataset be deleted here?
os.unlink( dataset.file_name )
# Remove associated extra files from disk if they exist
if dataset.extra_files_path and os.path.exists( dataset.extra_files_path ):
shutil.rmtree( dataset.extra_files_path ) #we need to delete the directory and its contents; os.unlink would always fail on a directory
dataset.purged = True
dataset.flush()
else:
print "# This dataset (%i) is not purgable, the file (%s) will not be removed.\n" % ( dataset.id, dataset.file_name )
except OSError, exc:
print "# Error, file has already been removed: %s" % str( exc )
dataset.purged = True
dataset.flush()
except Exception, exc:
print "# Error, exception: %s caught attempting to purge %s\n" %( str( exc ), dataset.file_name )
else:
print "# Error: '%s' has not previously been deleted, so it cannot be purged\n" % dataset.file_name
print ""
def _purge_folder( folder, app, remove_from_disk, info_only = False ):
"""Purges a folder and its contents, recursively"""
for ld in folder.datasets:
ld.deleted = True
for ldda in [ld.library_dataset_dataset_association] + ld.expired_datasets:
_purge_dataset_instance( ldda, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
for sub_folder in folder.folders:
_purge_folder( sub_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
# TODO: should the folder permissions be deleted here?
folder.purged = True
folder.flush()
class CleanupDatasetsApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, database_connection=None, file_path=None ):
print >> sys.stderr, "python path is: " + ", ".join( sys.path )
if database_connection is None:
raise Exception( "CleanupDatasetsApplication requires a database_connection value" )
if file_path is None:
raise Exception( "CleanupDatasetsApplication requires a file_path value" )
self.database_connection = database_connection
self.file_path = file_path
# Setup the database engine and ORM
self.model = galaxy.model.mapping.init( self.file_path, self.database_connection, engine_options={}, create_tables=False )
if __name__ == "__main__":
main()
| dbcls/dbcls-galaxy | scripts/cleanup_datasets/cleanup_datasets.py | Python | mit | 16,195 | [
"Galaxy"
] | 24872fb8acac2cc27bbef893a81ddce1aba13559598c57d046a1e0488e655c95 |
# -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks == None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0]-ext[0]/2.+dx/2. + dx*np.arange(top['columns'])
yticks = ctr[1]-ext[1]/2.+dy/2. + dy*np.arange(top['rows'])
if xlim == None:
xlim = [ctr[0]-ext[0]/2.-dx/2., ctr[0]+ext[0]/2.+dx/2.] # extra space so extent is visible
ylim = [ctr[1]-ext[1]/2.-dy/2., ctr[1]+ext[1]/2.+dy/2.]
else:
ext = [xlim[1]-xlim[0], ylim[1]-ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5,6),yticks=range(-5,6),
xlim=[-5.5,5.5],ylim=[-5.5,5.5]):
if targets==None:
targets=((tp.FindCenterElement(layer),'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim,ylim=ylim,xticks=xticks,yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l),'red'),
(tp.FindNearestElement(l, [4.,5.]),'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc),'red'),
(tp.FindNearestElement(lpbc, [4.,5.]),'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2))
fig = plt.figure()
#{ conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]}}}
#{ end #}
free_mask_fig(fig, 231, conndict)
#{ conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 234, conndict)
#{ conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 232, conndict)
#{ conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0,0.0]}}
#{ end #}
free_mask_fig(fig, 235, conndict)
#{ conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 233, conndict)
#{ conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5,1.5]}}
#{ end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5,6),yticks=range(-5,6),
xlim=[-5.5,5.5],ylim=[-5.5,5.5]):
if targets==None:
targets=((tp.FindCenterElement(layer),'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5,.5,1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11.,11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc,projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left' : [-2.,-1.,-1.],
'upper_right': [ 2., 1., 1.]}}}
#{ end #}
free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2),
showkern=showkern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}}
#{ end #}
kernel_fig(fig, 232, conndict)
#{ conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}, 'anchor': [1.5,1.5]},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'anchor': [1.5,1.5]}}}
#{ end #}
kernel_fig(fig, 233, conndict)
plt.draw()
#{ conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'cutoff': 0.5}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1., 'sigma_y': 3.}}}
#{ end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
import numpy as np
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1,51], ylim=[0,1], xticks=range(0,51,5),
yticks=np.arange(0.,1.1,0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos == None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:,0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51.,1.], 'center': [25.,0.],
'elements': 'iaf_neuron'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay' , label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51.,1.], 'center': [25.,0.],
'elements': 'iaf_neuron', 'edge_wrap': True}
#{ conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay' , label='Delay', clr='red')
fig.gca().legend()
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25.,0.], clr='orange')
#{ conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25.,0.])
#{ conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',rpos=[25.,0.])
#{ conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',rpos=[25.,0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0.,.5], ylim=[0,3.5], xticks=range(0,51,5),
yticks=np.arange(0.,1.1,0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs,tgts))
ax.hist(dist, bins=50, histtype='stepfilled',normed=True)
r=np.arange(0.,0.51,0.01)
plt.plot(r, 2*np.pi*r*(1-2*r)*12/np.pi,'r-',lw=3,zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
#{ conn6 #}
pos = [[np.random.uniform(-1.,1.),np.random.uniform(-1.,1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2.,2.],
'elements': 'iaf_neuron', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
#{ end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
#{ conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2,-0.2],
'upper_right':[0.2,0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2,-0.2],
'upper_right':[0.2,0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn9 #}
nrns = tp.CreateLayer({'rows' : 20,
'columns' : 20,
'elements': 'iaf_neuron'})
stim = tp.CreateLayer({'rows' : 1,
'columns' : 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask' : {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrns, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = tp.CreateLayer({'rows' : 1,
'columns' : 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask' : {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrns, rec, cdict_rec)
#{ end #}
| zifeo/nest-simulator | topology/doc/user_manual_scripts/connections.py | Python | gpl-2.0 | 18,038 | [
"Gaussian"
] | 63afe2e5ad46ed52477faf07ddf5140ee94d7f05ff31c6f5d82a6a1d06584473 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# CreateTime: 2016-09-21 16:51:48
import numpy as np
from Bio import SeqIO, Seq, SeqUtils
#from Bio.SeqUtils.CodonUsage import CodonAdaptationIndex
from Bio.SeqUtils import GC
from Bio.SeqUtils.CodonUsage import SynonymousCodons
import math
from math import log, sqrt
from collections import Counter
import pickle
from sklearn import cross_validation, metrics # Additional scklearn functions
#from sklearn.grid_search import GridSearchCV # Perforing grid search
from keras import backend as K
def f1_score(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall))
##########################################################################
# overlap of two gene
##########################################################################
overlap = lambda s0, e0, s1, e1: min(e0, e1) - max(s0, s1) + 1
##########################################################################
# share kmer between 2 sequence
##########################################################################
def pearson(x, y):
N, M = len(x), len(y)
assert N == M
x_m, y_m = sum(x) * 1. / N, sum(y) * 1. / M
a, b, c = 0., 0., 0.
for i in xrange(N):
xi, yi = x[i] - x_m, y[i] - y_m
a += xi * yi
b += xi ** 2
c += yi ** 2
try:
return a / sqrt(b * c)
except:
return 0
def sharekmer(s1, s2):
# SynonymousCodons
n1, n2 = map(len, [s1, s2])
k1 = [s1[elem: elem + 3] for elem in xrange(0, n1, 3)]
k2 = [s1[elem: elem + 3] for elem in xrange(0, n2, 3)]
fq1 = Counter(k1)
fq2 = Counter(k2)
flag = 0
kmers = []
for i in SynonymousCodons:
j = SynonymousCodons[i]
if len(j) < 2:
continue
c1 = [[fq1[elem], elem] for elem in j]
best1 = max(c1, key=lambda x: x[0])
c2 = [[fq2[elem], elem] for elem in j]
best2 = max(c2, key=lambda x: x[0])
#c1.sort(key = lambda x: x[0], reverse = True)
#c2.sort(key = lambda x: x[0], reverse = True)
if best1[1] == best2[1]:
kmers.append(best1[1])
# for val in SynonymousCodons.values():
# if len(val) > 5:
# kmers.extend(val)
# print 'the kmer', kmers, len(kmers)
vec1 = [fq1[elem] for elem in kmers]
vec2 = [fq2[elem] for elem in kmers]
return pearson(vec1, vec2)
##########################################################################
# the motif found
##########################################################################
box_up10 = ['TATAAT', [77, 76, 60, 61, 56, 82]]
box_up35 = ['TTGACA', [69, 79, 61, 56, 54, 54]]
# find the best region that may be a candidate of a motif
def find_motif(seq, motif, bg=None):
if bg is None:
bg = {}
l = len(motif[0])
#best = float('-inf')
best = -100
idx = -1
for i in xrange(0, len(seq) - l + 1):
lmer = seq[i: i + l]
score = 0
for a, b, c in zip(lmer, motif[0], motif[1]):
if a == b:
score += log(float(c) / bg.get(a, 1.))
else:
score += log((100. - c) / bg.get(a, 1.))
# try:
# score += log((100. - c) / bg.get(a, 1.))
# except:
# print c, bg.get(a, 1.)
if score >= best:
idx = i
best = score
return [seq[idx: idx + l], len(seq) - idx, best]
##########################################################################
# cai, from biopython
##########################################################################
index = Counter({'GCT': 1, 'CGT': 1, 'AAC': 1, 'GAC': 1, 'TGC': 1, 'CAG': 1, 'GAA': 1, 'GGT': 1, 'CAC': 1, 'ATC': 1, 'CTG': 1, 'AAA': 1, 'ATG': 1, 'TTC': 1, 'CCG': 1, 'TCT': 1, 'ACC': 1, 'TGG': 1, 'TAC': 1, 'GTT': 1, 'ACT': 0.965, 'TCC': 0.744, 'GGC': 0.724, 'GCA': 0.586, 'TGT': 0.5, 'GTA': 0.495, 'GAT': 0.434, 'GCG': 0.424, 'AGC': 0.41, 'CGC': 0.356, 'TTT': 0.296, 'CAT': 0.291, 'GAG': 0.259,
'AAG': 0.253, 'TAT': 0.239, 'GTG': 0.221, 'ATT': 0.185, 'CCA': 0.135, 'CAA': 0.124, 'GCC': 0.122, 'ACG': 0.099, 'AGT': 0.085, 'TCA': 0.077, 'ACA': 0.076, 'CCT': 0.07, 'GTC': 0.066, 'AAT': 0.051, 'CTT': 0.042, 'CTC': 0.037, 'TTA': 0.02, 'TTG': 0.02, 'GGG': 0.019, 'TCG': 0.017, 'CCC': 0.012, 'GGA': 0.01, 'CTA': 0.007, 'AGA': 0.004, 'CGA': 0.004, 'CGG': 0.004, 'ATA': 0.003, 'AGG': 0.002})
def cai(seq):
if seq.islower():
seq = seq.upper()
N = len(seq)
cai_value, cai_length = 0, 0
for i in xrange(0, N, 3):
codon = seq[i: i + 3]
if codon in index:
if codon not in ['ATG', 'TGG']:
cai_value += math.log(index[codon])
cai_length += 1
elif codon not in ['TGA', 'TAA', 'TAG']:
continue
else:
continue
if cai_length > 0:
return math.exp(cai_value / cai_length)
else:
return 0
##########################################################################
# get the features
##########################################################################
# convert ATCG based kmer number
#code = {'A': 1, 'a': 1, 'T': 2, 't': 2, 'G': 3, 'g': 3, 'C': 4, 'c': 4}
code = [0] * 256
code5 = [0] * 256
flag = 0
for i in 'ATGC':
code[ord(i.lower())] = code[ord(i)] = flag
code5[ord(i.lower())] = code5[ord(i)] = flag + 1
flag += 1
# convert string to number
def s2n(s, code=code, scale=None):
if scale == None:
scale = max(code) + 1
N = 0
output = 0
for i in s[::-1]:
#output += code.get(i, 0) * scale ** N
output += code[ord(i)] * scale ** N
N += 1
return output
# reverse of s2n
def n2s(n, length, alpha='ATGC', scale=None):
if scale == None:
scale = max(code) + 1
N = n
s = []
for i in xrange(length):
s.append(alpha[N % scale])
N /= scale
return ''.join(s[::-1])
# convert the dna sequence to kmer-position matrix.
# if length of dna < given, then add NNN in the center of the sequence.
# else if length of dna > given, then trim the center of the sequence.
# the new kpm, reshape
def kpm(S, d=64, k=3, code=code, scale=None):
if scale == None:
scale = max(code) + 1
N = scale ** k
assert isinstance(d, int)
L = len(S)
if d < L:
F = d // 2
R = d - F
seq = ''.join([S[: F], S[-R:]])
elif d > L:
F = L // 2
R = L - F
seq = ''.join([S[: F], 'N' * (d - L), S[-R:]])
else:
seq = S
mat = [[0] * (d // 3) for elem in xrange(N * 3)]
for i in xrange(0, d - k + 1):
kmer = seq[i: i + k]
if 'N' in kmer or 'n' in kmer:
continue
R = s2n(kmer, code=code, scale=scale)
mat[R + i % 3 * N][i // 3] = 1
mat = np.asarray(mat, 'int8')
return mat
# get features by give loc1, start and end:
# get xx
def get_xx(j, seq_dict, kmer=2, dim=128, mode='train', context=False):
loc1, scf1, std1, st1, ed1, loc2, scf2, std2, st2, ed2 = j[: 10]
if scf1 != scf2 or std1 != std2:
if context:
X0 = np.ones((4 ** kmer * 3, dim // 3 * 3))
else:
X0 = np.ones((4 ** kmer * 3, dim // 3))
X1 = [10**4] * 11
X2 = [127] * dim
return [X0], X1, X2
# get the sequence
st1, ed1, st2, ed2 = map(int, [st1, ed1, st2, ed2])
st1 -= 1
st2 -= 1
if st1 > st2:
loc1, scf1, std1, st1, ed1, loc2, scf2, std2, st2, ed2 = loc2, scf2, std2, st2, ed2, loc1, scf1, std1, st1, ed1
seq1 = seq_dict[scf1][st1: ed1]
seq1 = std1 == '+' and seq1 or seq1.reverse_complement()
seq2 = seq_dict[scf2][st2: ed2]
seq2 = std1 == '+' and seq2 or seq2.reverse_complement()
start, end = ed1, st2
seq12 = seq_dict[scf1][start: end]
seq12 = std1 == '+' and seq12 or seq12.reverse_complement()
seq1, seq2, seq12 = map(str, [seq1.seq, seq2.seq, seq12.seq])
seq1, seq2, seq12 = seq1.upper(), seq2.upper(), seq12.upper()
# 1D features such as gc, dist
cai1, cai2, cai12 = map(cai, [seq1, seq2, seq12])
dist = st2 - ed1
distn = (st2 - ed1) * 1. / (ed2 - st1)
ratio = math.log((ed1 - st1) * 1. / (ed2 - st2))
ratio = std1 == '+' and ratio or -ratio
idx = -100
bgs = Counter(seq12[idx:])
up10, up35 = find_motif(seq12[idx:], box_up10, bgs), find_motif(
seq12[idx:], box_up35, bgs)
if seq12[idx:]:
gc = SeqUtils.GC(seq12[idx:])
try:
skew = SeqUtils.GC_skew(seq12[idx:])[0]
except:
skew = 0.
else:
gc = skew = 0.
bias = sharekmer(seq1, seq2)
if st1 == st2 == '+':
X1 = [cai1, cai2, bias, distn, ratio, gc, skew] + up10[1:] + up35[1:]
else:
X1 = [cai2, cai1, bias, distn, ratio, gc, skew] + up10[1:] + up35[1:]
# 2D features of kmer matrix
if context:
seqmat12 = kpm(seq12, d=dim, k=kmer, scale=4)
seqmat1 = kpm(seq1, d=dim, k=kmer, scale=4)
seqmat2 = kpm(seq2, d=dim, k=kmer, scale=4)
seqmat = np.concatenate((seqmat1, seqmat12, seqmat2), 1)
else:
seqmat = kpm(seq12, d=dim, k=kmer, scale=4)
if ed1 > st2:
seqmat[:] = 0
X0 = [seqmat]
n12 = len(seq12)
X2 = [s2n(seq12[elem: elem + kmer], code5)
for elem in xrange(n12 - kmer + 1)]
return X0, X1, X2
# get single line of features
def get_xx_one(j, seq_dict, kmer = 2, dim = 128, mode = 'train'):
X0, X1, X2 = get_xx(j, seq_dict, kmer, dim, mode)
x0, x1, x2 = map(np.asarray, [[X0], [X1], [X2]])
return x0, x1, X2
# generate training and testing data
def get_xxy(f, seq_dict, kmer = 2, dim = 128):
# get the training data
X0, X1, X2, y = [], [], [], []
for i in f:
j = i[:-1].split('\t')
x0, x1, x2 = get_xx(j, seq_dict, kmer, dim)
X0.append(x0)
X1.append(x1)
X2.append(x2)
y.append(j[-1] == 'True' and 1 or 0)
X0 = np.asarray(X0, 'int8')
X1 = np.asarray(X1, 'float32')
X2 = np.asarray(X2)
y = np.asarray(y, 'int8')
return X0, X1, X2, y
# split the X0, X1, y data to training and testing
def split_xxy(X0, X1, X2, y, train_size=1. / 3, seed=42):
N = X0.shape[0]
idx = np.arange(N)
np.random.seed(seed)
np.random.shuffle(idx)
start = int(train_size * N)
idx_train, idx_test = idx[: start], idx[start:]
X0_train, X1_train, X2_train, y_train = X0[
idx_train], X1[idx_train], X2[idx_train], y[idx_train]
X0_test, X1_test, X2_test, y_test = X0[idx_test], X1[
idx_test], X2[idx_test], y[idx_test]
return X0_train, X1_train, X2_train, y_train, X0_test, X1_test, X2_test, y_test
##########################################################################
# the CNN class
##########################################################################
class CNN:
def __init__(self, nb_filter=64, nb_pool=3, nb_conv=2, nb_epoch=10, batch_size=64, maxlen=128, save_path='./weights.hdf5'):
self.nb_filter = nb_filter
self.nb_pool = nb_pool
self.nb_conv = nb_conv
self.nb_epoch = nb_epoch
self.batch_size = batch_size
self.maxlen = maxlen
self.opt = Adam(lr=5e-4, beta_1=0.995, beta_2=0.999, epsilon=1e-09)
self.checkpointer = [ModelCheckpoint(filepath=save_path, verbose=1, save_best_only=True, mode='max', monitor='val_fbeta_score')]
self.metric = keras.metrics.fbeta_score
#self.metric = f1_score
self.cross_val = 1 / 3.
def fit_2d(self, X_train, y_train, X_test=None, y_test=None):
Y_train = np_utils.to_categorical(y_train)
if type(y_test) == type(None):
Y_test = None
else:
Y_test = np_utils.to_categorical(y_test)
nb_classes = Y_train.shape[1]
# set parameter for cnn
loss = nb_classes > 2 and 'categorical_crossentropy' or 'binary_crossentropy'
print 'loss function is', loss
# number of convolutional filters to use
nb_filters = self.nb_filter
# size of pooling area for max pooling
nb_pool = self.nb_pool
# convolution kernel size
nb_conv = self.nb_conv
# traning iteration
nb_epoch = self.nb_epoch
batch_size = self.batch_size
a, b, img_rows, img_cols = X_train.shape
# set the conv model
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', input_shape=(b, img_rows, img_cols), activation='relu', name='conv1_1'))
#model.add(Conv2D(64, (2, 2), padding="same", activation="relu", name="conv1_1", input_shape=(1, 192, 4)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='sigmoid'))
opt = self.opt
model.compile(loss=loss, optimizer='adam', metrics=[self.metric])
# set the check pointer to save the best model
if type(X_test) != type(None) and type(Y_test) != type(None):
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1,
validation_data=(X_test, Y_test), shuffle=True, validation_split=1e-4, callbacks=self.checkpointer)
else:
model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=nb_epoch, verbose=1, shuffle=True, validation_split = self.cross_val, callbacks=self.checkpointer)
self.model_2d = model
def predict_2d(self, X):
return self.model_2d.predict(X).argmax(1)
# load an training model
def load(self, name, mode='2d'):
model = keras.models.load_model(name)
if mode == '2d':
self.model_2d = model
else:
pass
# save the model
def save(self, name, model='2d'):
if model == '2d':
self.model_2d.save(name+'_'+model)
else:
pass
# run training
def run_train(train, seq_dict, clf, mode='2d'):
# get the training data
split_rate = 1. / 3
if mode == '2d':
f = open(train, 'r')
X, X1, X2, y = get_xxy(f, seq_dict, 3, 128)
X_train, X1_train, X2_train, y_train, X_test, X1_test, X2_test, y_test = split_xxy(
X, X1, X2, y, split_rate)
f.close()
clf.fit_2d(X_train, y_train, X_test, y_test)
# the test score
Y_test = np_utils.to_categorical(y_test)
score = clf.model_2d.evaluate(X_test, Y_test, verbose=0)
print(' Test score:', score[0])
print('Test accuracy:', score[1])
# validate
y_test_pred = clf.predict_2d(X_test)
#clf.save(train, mode)
precise = metrics.precision_score(y_test, y_test_pred)
recall = metrics.recall_score(y_test, y_test_pred)
f1 = metrics.f1_score(y_test, y_test_pred)
print 'Precise:', precise
print ' Recall:', recall
print ' F1:', f1
# run the adjacent prediction
def run_adjacent_predict(adjacent, seq_dict, model, clf, mode='2d'):
adjacent, model = sys.argv[3: 5]
seq_dict = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
clf.load(model, mode)
# get the locus of genes
f = open(adjacent, 'r')
for i in f:
j = i[:-1].split('\t')
x0, x1, x2 = get_xx_one(j, seq_dict, 3, 128, 'test')
# print 'data shape', x0.shape, x1.shape
if mode == '2d':
res = clf.predict_2d(x0)[0]
else:
pass
res = res == 1 and 'True' or 'False'
print i[: -1] + '\t' + str(res)
f.close()
# run the whole genome prediction
# generate adjacent gene pairs from the gene list
def adjacent_genes(f):
locus_list = []
for i in f:
j = i[: -1].split('\t')
if len(j) < 7:
j.extend([0] * 7)
locus, scaf, strand, start, end = j[: 5]
start, end = map(int, [start, end])
locus_list.append([locus, scaf, strand, start, end])
locus_list.sort(key=lambda x: x[1: 5])
return locus_list
def run_genome_predict(genome, seq_dict, model, clf, mode='2d'):
genome, model = sys.argv[3: 5]
seq_dict = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
clf.load(model, mode)
# get the locus of genes
f = open(genome, 'r')
locus_list = adjacent_genes(f)
f.close()
for a, b in zip(locus_list[: -1], locus_list[1:]):
j = a + b
x0, x1, x2 = get_xx_one(j, seq_dict, 3, 128, 'test')
if mode == '2d':
if a[1] == b[1] and a[2] == b[2]:
res = clf.predict_2d(x0)[0]
else:
res = 0
else:
pass
res = res == 1 and 'True' or 'False'
i = '\t'.join(map(str, j))
print i + '\t' + str(res)
if __name__ == '__main__':
import sys
if len(sys.argv[1:]) < 3:
print '#' * 79
print '# To train a model:'
print '#' * 79
print 'python this.py train foo.fasta foo.train.txt [mode]\n'
print 'foo.train.txt is the gene location in the format:'
print ' locus1\tscf1\tstrand1\tstart1\tend1\tlocus2\tscf2\tstrand2\tstart2\tend2\tcat\n'
print '#' * 79
print '# To make a adjacent genes prediction'
print '#' * 79
print 'python this.py adjacent foo.fasta foo.adjacent.txt foo.model [mode]\n'
print 'foo.adjacent.txt is the gene location in the format:'
print ' locus1\tscf1\tstrand1\tstart1\tend1\tlocus2\tscf2\tstrand2\tstart2\tend2\n'
print '#' * 79
print '# To make a whole genome prediction'
print '#' * 79
print 'python this.py genome foo.fasta foo.genome.txt foo.model [mode]'
print 'foo.genome.txt is the gene location in the format:'
print ' locus1\tscf1\tstrand1\tstart1\tend1'
print ''
print '#' * 79
print 'start1/2: start of the gene in the genome, start > 0 need be adjust in the program'
print ' cat: indicate whether operon or not'
print ' mode: 2d'
raise SystemExit()
import keras
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.layers import Dense, Dropout, Activation, Flatten, Embedding
from keras.layers import Input, Merge, LSTM, GRU, Bidirectional, UpSampling2D, InputLayer
from keras.optimizers import SGD, Adam, RMSprop
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, Conv2D
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.models import Model
from keras import backend as K
from keras import objectives
from keras.layers import Input, Dense, Lambda
import numpy as np
model, fasta = sys.argv[1: 3]
# save the genome to an dict
seq_dict = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
if model.startswith('train'):
train = sys.argv[3]
try:
mode = sys.argv[4]
except:
mode = '2d'
clf = CNN(nb_epoch = 16, maxlen = 128, save_path = train + '_' + mode + '.hdf5')
run_train(train, seq_dict, clf, mode)
elif model.startswith('predict'):
if len(sys.argv[1:]) < 4:
print '#' * 79
print '# To make a adjacent genes prediction'
print '#' * 79
print 'python this.py predict foo.fasta foo.adjacent.txt foo.model\n'
print 'foo.adjacent.txt is the gene location in the format:'
print ' locus1\tscf1\tstrand1\tstart1\tend1\tlocus2\tscf2\tstrand2\tstart2\tend2\n'
raise SystemExit()
test, model = sys.argv[3: 5]
try:
mode = sys.argv[5]
except:
mode = '2d'
clf = CNN(nb_epoch = 128, maxlen = 128)
# determine the number of col
f = open(test, 'r')
header = f.next().split('\t')
f.close()
if header.count('+') + header.count('-') > 1:
run_adjacent_predict(test, seq_dict, model, clf, mode)
else:
run_genome_predict(test, seq_dict, model, clf, mode)
else:
pass
| Rinoahu/POEM | deprecate/lib/deep_operon.py | Python | gpl-3.0 | 21,697 | [
"Biopython"
] | 75e884a1b853240736e658c78f04e528f48d407f13826ca8a8a84e2178b5d3e5 |
from paraview.simple import *
import os
import numpy as np
model = GetActiveSource()
renderView1 = GetActiveViewOrCreate('RenderView')
renderView1.CameraParallelProjection = 1
Hide(model, renderView1)
# waves
extractBlock1 = ExtractBlock(Input=model)
extractBlock1.BlockIndices = [1, 2]
extractBlock1Display = Show(extractBlock1, renderView1)
extractBlock1Display.ColorArrayName = [None, '']
extractBlock1Display.DiffuseColor = [0.0, 0.0, 1.0]
extractBlock1Display.Opacity = 0.75
RenameSource('waves', extractBlock1)
calculator1 = Calculator(Input=extractBlock1)
calculator1.Function = 'coordsZ'
calculator1.ResultArrayName = 'elevation'
RenameSource('elevation', calculator1)
# ground
tmp = model.GetProperty('FileName')
dir = os.path.dirname(str(tmp)[1:-2])
filename = dir + os.sep + 'ground.txt'
f = open(filename,'r')
ground = np.loadtxt(f)
f.close()
gextent = ground[0]
gdepth = -ground[1]
MoorDyn = ground[2]
plane1 = Plane()
plane1.Origin = [-gextent,-gextent,gdepth]
plane1.Point1 = [gextent,-gextent,gdepth]
plane1.Point2 = [-gextent,gextent,gdepth]
plane1Display = Show(plane1, renderView1)
plane1Display.ColorArrayName = [None, '']
plane1Display.DiffuseColor = [0.6666666666666666, 0.6666666666666666, 0.4980392156862745]
plane1Display.EdgeColor = [0.5000076295109483, 0.36787975890745406, 0.12631418326085297]
RenameSource('ground', plane1)
# bodies
filename = dir + os.sep + 'bodies.txt'
f = open(filename,'r')
bodies = model.GetDataInformation().GetCompositeDataInformation().GetNumberOfChildren() - 1
if MoorDyn:
bodies = bodies-1
for i in range(bodies):
SetActiveSource(model)
extractBlock1_2 = ExtractBlock(Input=model)
extractBlock1_2.BlockIndices = [2+(i*2)+1, 2+(i*2)+2]
extractBlock1_2Display = Show(extractBlock1_2, renderView1)
extractBlock1_2Display.ColorArrayName = [None, '']
name = f.readline()[:-1]
color = np.fromstring(f.readline()[:-1],sep=' ')
opacity = np.fromstring(f.readline()[:-1],sep=' ')
RenameSource('body_'+str(i+1)+': '+name , extractBlock1_2)
extractBlock1_2Display.DiffuseColor = color
extractBlock1_2Display.Opacity = opacity
f.readline()
f.close()
# mooring
if MoorDyn:
SetActiveSource(model)
extractBlock1_3 = ExtractBlock(Input=model)
extractBlock1_3.BlockIndices = [2+((bodies-1)*2)+1+2, 2+((bodies-1)*2)+2+2]
extractBlock1_3Display = Show(extractBlock1_3, renderView1)
extractBlock1_3Display.ColorArrayName = [None, '']
RenameSource('MoorDyn', extractBlock1_3)
extractBlock1_3Display.DiffuseColor = [0.0, 0.0, 0.0]
extractBlock1_3Display.Opacity = 1.0
| ratanakso/WEC-Sim | source/paraview_macros/WEC-Sim.py | Python | apache-2.0 | 2,530 | [
"ParaView"
] | 2c97c546001aa4726f9a440b43f929a39d0d786fb2809bcfa63ecc67ba0f2977 |
# encoding: utf-8
"""
Class for reading data from NeuroExplorer (.nex)
Documentation for dev :
http://www.neuroexplorer.com/code.html
Depend on: scipy
Supported : Read
Author: sgarcia,luc estebanez
"""
from .baseio import BaseIO
from ..core import *
from .tools import create_many_to_one_relationship
import numpy as np
import quantities as pq
import struct
import datetime
import os
class NeuroExplorerIO(BaseIO):
"""
Class for reading nex file.
Usage:
>>> from neo import io
>>> r = io.NeuroExplorerIO(filename='File_neuroexplorer_1.nex')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<AnalogSignal(array([ 39.0625 , 0. , 0. , ..., -26.85546875, ...
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 2.29499992e-02, 6.79249987e-02, 1.13399997e-01, ...
>>> print seg.eventarrays # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<EventArray: @21.1967754364 s, @21.2993755341 s, @21.350725174 s, @21.5048999786 s, ...
>>> print seg.epocharrays # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<neo.core.epocharray.EpochArray object at 0x10561ba90>, <neo.core.epocharray.EpochArray object at 0x10561bad0>]
"""
is_readable = True
is_writable = False
supported_objects = [Segment , AnalogSignal, SpikeTrain, EventArray, EpochArray]
readable_objects = [ Segment]
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuf : a definition for parameters when reading.
read_params = {
Segment : [ ]
}
write_params = None
name = 'NeuroExplorer'
extensions = [ 'nex' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read a nex file.
Arguments:
filename : the filename to read you can pu what ever it do not read anythings
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy = False,
cascade = True,
):
fid = open(self.filename, 'rb')
globalHeader = HeaderReader(fid , GlobalHeader ).read_f(offset = 0)
#~ print globalHeader
#~ print 'version' , globalHeader['version']
seg = Segment()
seg.file_origin = os.path.basename(self.filename)
seg.annotate(neuroexplorer_version = globalHeader['version'])
seg.annotate(comment = globalHeader['comment'])
if not cascade :
return seg
offset = 544
for i in range(globalHeader['nvar']):
entityHeader = HeaderReader(fid , EntityHeader ).read_f(offset = offset+i*208)
entityHeader['name'] = entityHeader['name'].replace('\x00','')
#print 'i',i, entityHeader['type']
if entityHeader['type'] == 0:
# neuron
if lazy:
spike_times = [ ]*pq.s
else:
spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
spike_times = spike_times.astype('f8')/globalHeader['freq']*pq.s
sptr = SpikeTrain( times= spike_times,
t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s,
t_stop = globalHeader['tend']/globalHeader['freq']*pq.s,
name = entityHeader['name'],
)
if lazy:
sptr.lazy_shape = entityHeader['n']
sptr.annotate(channel_index = entityHeader['WireNumber'])
seg.spiketrains.append(sptr)
if entityHeader['type'] == 1:
# event
if lazy:
event_times = [ ]*pq.s
else:
event_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
event_times = event_times.astype('f8')/globalHeader['freq'] * pq.s
labels = np.array(['']*event_times.size, dtype = 'S')
evar = EventArray(times = event_times, labels=labels, channel_name = entityHeader['name'] )
if lazy:
evar.lazy_shape = entityHeader['n']
seg.eventarrays.append(evar)
if entityHeader['type'] == 2:
# interval
if lazy:
start_times = [ ]*pq.s
stop_times = [ ]*pq.s
else:
start_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
start_times = start_times.astype('f8')/globalHeader['freq']*pq.s
stop_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset']+entityHeader['n']*4,
)
stop_times = stop_times.astype('f')/globalHeader['freq']*pq.s
epar = EpochArray(times = start_times,
durations = stop_times - start_times,
labels = np.array(['']*start_times.size, dtype = 'S'),
channel_name = entityHeader['name'])
if lazy:
epar.lazy_shape = entityHeader['n']
seg.epocharrays.append(epar)
if entityHeader['type'] == 3:
# spiketrain and wavefoms
if lazy:
spike_times = [ ]*pq.s
waveforms = None
else:
spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
spike_times = spike_times.astype('f8')/globalHeader['freq'] * pq.s
waveforms = np.memmap(self.filename , np.dtype('i2') ,'r' ,
shape = (entityHeader['n'] , 1,entityHeader['NPointsWave']),
offset = entityHeader['offset']+entityHeader['n'] *4,
)
waveforms = (waveforms.astype('f')* entityHeader['ADtoMV'] + entityHeader['MVOffset'])*pq.mV
sptr = SpikeTrain( times = spike_times,
t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s,
t_stop = globalHeader['tend']/globalHeader['freq']*pq.s,
name = entityHeader['name'],
waveforms = waveforms,
sampling_rate = entityHeader['WFrequency']*pq.Hz,
left_sweep = 0*pq.ms,
)
if lazy:
sptr.lazy_shape = entityHeader['n']
sptr.annotate(channel_index = entityHeader['WireNumber'])
seg.spiketrains.append(sptr)
if entityHeader['type'] == 4:
# popvectors
pass
if entityHeader['type'] == 5:
# analog
timestamps= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
timestamps = timestamps.astype('f8')/globalHeader['freq']
fragmentStarts = np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
fragmentStarts = fragmentStarts.astype('f8')/globalHeader['freq']
t_start = timestamps[0] - fragmentStarts[0]/float(entityHeader['WFrequency'])
del timestamps, fragmentStarts
if lazy :
signal = [ ]*pq.mV
else:
signal = np.memmap(self.filename , np.dtype('i2') ,'r' ,
shape = (entityHeader['NPointsWave'] ),
offset = entityHeader['offset'],
)
signal = signal.astype('f')
signal *= entityHeader['ADtoMV']
signal += entityHeader['MVOffset']
signal = signal*pq.mV
anaSig = AnalogSignal(signal = signal , t_start =t_start*pq.s , sampling_rate = entityHeader['WFrequency']*pq.Hz, name = entityHeader['name'])
if lazy:
anaSig.lazy_shape = entityHeader['NPointsWave']
anaSig.annotate(channel_index = entityHeader['WireNumber'])
seg.analogsignals.append( anaSig )
if entityHeader['type'] == 6:
# markers : TO TEST
if lazy:
times = [ ]*pq.s
labels = np.array([ ], dtype = 'S')
markertype = None
else:
times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
times = times.astype('f8')/globalHeader['freq'] * pq.s
fid.seek(entityHeader['offset'] + entityHeader['n']*4)
markertype = fid.read(64).replace('\x00','')
labels = np.memmap(self.filename, np.dtype('S' + str(entityHeader['MarkerLength'])) ,'r',
shape = (entityHeader['n'] ),
offset = entityHeader['offset'] + entityHeader['n']*4 + 64
)
ea = EventArray( times = times,
labels = labels.view(np.ndarray),
name = entityHeader['name'],
channel_index = entityHeader['WireNumber'],
marker_type = markertype
)
if lazy:
ea.lazy_shape = entityHeader['n']
seg.eventarrays.append(ea)
create_many_to_one_relationship(seg)
return seg
GlobalHeader = [
('signature' , '4s'),
('version','i'),
('comment','256s'),
('freq','d'),
('tbeg','i'),
('tend','i'),
('nvar','i'),
]
EntityHeader = [
('type' , 'i'),
('varVersion','i'),
('name','64s'),
('offset','i'),
('n','i'),
('WireNumber','i'),
('UnitNumber','i'),
('Gain','i'),
('Filter','i'),
('XPos','d'),
('YPos','d'),
('WFrequency','d'),
('ADtoMV','d'),
('NPointsWave','i'),
('NMarkers','i'),
('MarkerLength','i'),
('MVOffset','d'),
('dummy','60s'),
]
MarkerHeader = [
('type' , 'i'),
('varVersion','i'),
('name','64s'),
('offset','i'),
('n','i'),
('WireNumber','i'),
('UnitNumber','i'),
('Gain','i'),
('Filter','i'),
]
class HeaderReader():
def __init__(self,fid ,description ):
self.fid = fid
self.description = description
def read_f(self, offset =0):
self.fid.seek(offset)
d = { }
for key, format in self.description :
val = struct.unpack(format , self.fid.read(struct.calcsize(format)))
if len(val) == 1:
val = val[0]
else :
val = list(val)
d[key] = val
return d
| tkf/neo | neo/io/neuroexplorerio.py | Python | bsd-3-clause | 13,880 | [
"NEURON"
] | c0bbead020ed904e0b629abfd3a52fcc1f56ae44bd15eca942c3396ac8435fd5 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_networkprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of NetworkProfile Avi RESTful Object
description:
- This module is used to configure NetworkProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
name:
description:
- The name of the network profile.
required: true
profile:
description:
- Networkprofileunion settings for networkprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the network profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a network profile for an UDP application
avi_networkprofile:
controller: ''
username: ''
password: ''
name: System-UDP-Fast-Path
profile:
type: PROTOCOL_TYPE_UDP_FAST_PATH
udp_fast_path_profile:
per_pkt_loadbalance: false
session_idle_timeout: 10
snat: true
tenant_ref: admin
'''
RETURN = '''
obj:
description: NetworkProfile (api/networkprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
name=dict(type='str', required=True),
profile=dict(type='dict', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networkprofile',
set([]))
if __name__ == '__main__':
main()
| adityacs/ansible | lib/ansible/modules/network/avi/avi_networkprofile.py | Python | gpl-3.0 | 3,671 | [
"VisIt"
] | fe97524a706bdce03a4766200b4c827294dbda802b45098582951ec81c41b821 |
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
os.chdir('/Users/evanbiederstedt/Downloads/RRBS_data_files')
mcell = glob.glob("RRBS_NormalBCD19pCD27mcell*")
newdf1 = pd.DataFrame()
for filename in mcell:
df = pd.read_table(filename)
df['filename'] = str(filename)
df = df.drop(['chr', 'start', 'strand', 'avgWeightedEnt','CpGEntropy', 'tss', 'genes', 'exons', 'introns', 'promoter',
'cgi', 'geneDensity', 'ctcf','ctcfUpstream', 'ctcfDownstream', 'ctcfDensity', 'geneDistalRegulatoryModules',
'vistaEnhancers','3PrimeUTR', '5PrimeUTR','firstExon', 'geneDistalRegulatoryModulesK562','hypoInHues64'], axis=1)
num_bins2 = np.ceil(df['avgReadCpGs'].max()/1.25)
df['avgReadCpGs_binned'] = pd.cut(df['avgReadCpGs'], num_bins2, labels=False)
df['read_stack_ID'] = (df.avgReadCpGs_binned.shift(1) != df.avgReadCpGs_binned).astype(int).cumsum()
df['total_reads'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df['avgReadCpGs'] = df['avgReadCpGs'].values.round(decimals=0)
intergenic_set = ( (df['genesDistance']>0) & (df['exonsDistance']>0) & (df['intronsDistance']>0) & (df['promoterDistance']>0) & (df['cgiDistance']>0) & (df['ctcfDistance']>0) &
(df['ctcfUpDistance']>0) & (df['ctcfDownDistance']>0) & (df['geneDistalRegulatoryModulesDistance']>0) & (df['vistaEnhancersDistance']>0) &
(df['3PrimeUTRDistance']>0) & (df['5PrimeUTRDistance']>0) & (df['firstExonDistance']>0) & (df['geneDistalRegulatoryModulesK562Distance']>0) & (df['hypoInHues64Distance']>0) )
df['totreads_tssDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['tssDistance']<0, 0)
df['totreads_genesDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['genesDistance']<0, 0)
df['totreads_exonsDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['exonsDistance']<0, 0)
df['totreads_intronsDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['intronsDistance']<0, 0)
df['totreads_promoterDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['promoterDistance']<0, 0)
df['totreads_cgiDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['cgiDistance']<0, 0)
df['totreads_ctcfDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['ctcfDistance']<0, 0)
df['totreads_ctcfUpDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['ctcfUpDistance']<0, 0)
df['totreads_ctcfDownDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['ctcfDownDistance']<0, 0)
df['totreads_geneDistalRegulatoryModulesDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['geneDistalRegulatoryModulesDistance']<0, 0)
df['totreads_vistaEnhancersDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['vistaEnhancersDistance']<0, 0)
df['totreads_3PrimeUTRDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['3PrimeUTRDistance']<0, 0)
df['totreads_5PrimeUTRDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['5PrimeUTRDistance']<0, 0)
df['totreads_firstExonDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['firstExonDistance']<0, 0)
df['totreads_geneDistalRegulatoryModulesK562Distance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['geneDistalRegulatoryModulesK562Distance']<0, 0)
df['totreads_hypoInHues64Distance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['hypoInHues64Distance']<0, 0)
df['totreads_intergenic'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(intergenic_set, 0)
#
# shore (+1 to +2000....on either side)
# shelf (+2000 to +4000....on either side)
#
df['totreads_shore'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where((df['cgiDistance'].between(0, 2000, inclusive=False)), 0) # 0 < cgidistance < 2000
df['totreads_shelf'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(((df['cgiDistance'] >= 2000) & (df['cgiDistance'] < 4000)), 0) # 2000 =< cgidistance < 4000
df['mixedReads_tssDistance'] = np.where(df['tssDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_genesDistance'] = np.where(df['genesDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_exonsDistance'] = np.where(df['exonsDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_intronsDistance'] = np.where(df['intronsDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_promoterDistance'] = np.where(df['promoterDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_cgiDistance'] = np.where(df['cgiDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_ctcfDistance'] = np.where(df['ctcfDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_ctcfUpDistance'] = np.where(df['ctcfUpDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_ctcfDownDistance'] = np.where(df['ctcfDownDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_geneDistalRegulatoryModulesDistance'] = np.where(df['geneDistalRegulatoryModulesDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_vistaEnhancersDistance'] = np.where(df['vistaEnhancersDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_3PrimeUTRDistance'] = np.where(df['3PrimeUTRDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_5PrimeUTRDistance'] = np.where(df['5PrimeUTRDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_firstExonDistance'] = np.where(df['firstExonDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_geneDistalRegulatoryModulesK562Distance'] = np.where(df['geneDistalRegulatoryModulesK562Distance'] <0, df['mixedReadCount'], 0)
df['mixedReads_hypoInHues64Distance'] = np.where(df['hypoInHues64Distance'] <0, df['mixedReadCount'], 0)
df['mixedReads_intergenic'] = np.where(intergenic_set, df['mixedReadCount'], 0)
df['mixedReads_shore'] = np.where((df['cgiDistance'].between(0, 2000, inclusive=False)), df['mixedReadCount'], 0)
df['mixedReads_shelf'] = np.where(((df['cgiDistance'] >= 2000) & (df['cgiDistance'] < 4000)), df['mixedReadCount'], 0)
df['thisMeth_tssDistance'] = np.where(df['tssDistance']<0, df['thisMeth'], 0)
df['thisMeth_genesDistance'] = np.where(df['genesDistance']<0, df['thisMeth'], 0)
df['thisMeth_exonsDistance'] = np.where(df['exonsDistance']<0, df['thisMeth'], 0)
df['thisMeth_intronsDistance'] = np.where(df['intronsDistance']<0, df['thisMeth'], 0)
df['thisMeth_promoterDistance'] = np.where(df['promoterDistance']<0, df['thisMeth'], 0)
df['thisMeth_cgiDistance'] = np.where(df['cgiDistance']<0, df['thisMeth'], 0)
df['thisMeth_ctcfDistance'] = np.where(df['ctcfDistance']<0, df['thisMeth'], 0)
df['thisMeth_ctcfUpDistance'] = np.where(df['ctcfUpDistance']<0, df['thisMeth'], 0)
df['thisMeth_ctcfDownDistance'] = np.where(df['ctcfDownDistance']<0, df['thisMeth'], 0)
df['thisMeth_geneDistalRegulatoryModulesDistance'] = np.where(df['geneDistalRegulatoryModulesDistance'] <0, df['thisMeth'], 0)
df['thisMeth_vistaEnhancersDistance'] = np.where(df['vistaEnhancersDistance'] <0, df['thisMeth'], 0)
df['thisMeth_3PrimeUTRDistance'] = np.where(df['3PrimeUTRDistance'] <0, df['thisMeth'], 0)
df['thisMeth_5PrimeUTRDistance'] = np.where(df['5PrimeUTRDistance'] <0, df['thisMeth'], 0)
df['thisMeth_firstExonDistance'] = np.where(df['firstExonDistance'] <0, df['thisMeth'], 0)
df['thisMeth_geneDistalRegulatoryModulesK562Distance'] = np.where(df['geneDistalRegulatoryModulesK562Distance'] <0, df['thisMeth'], 0)
df['thisMeth_hypoInHues64Distance'] = np.where(df['hypoInHues64Distance'] <0, df['thisMeth'], 0)
df['thisMeth_intergenic'] = np.where(intergenic_set, df['thisMeth'], 0)
df['thisMeth_shore'] = np.where((df['cgiDistance'].between(0, 2000, inclusive=False)), df['thisMeth'], 0)
df['thisMeth_shelf'] = np.where(((df['cgiDistance'] >= 2000) & (df['cgiDistance'] < 4000)), df['thisMeth'], 0)
df['thisUnmeth_tssDistance'] = np.where(df['tssDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_genesDistance'] = np.where(df['genesDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_exonsDistance'] = np.where(df['exonsDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_intronsDistance'] = np.where(df['intronsDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_promoterDistance'] = np.where(df['promoterDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_cgiDistance'] = np.where(df['cgiDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_ctcfDistance'] = np.where(df['ctcfDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_ctcfUpDistance'] = np.where(df['ctcfUpDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_ctcfDownDistance'] = np.where(df['ctcfDownDistance']<0, df['thisUnmeth'], 0)
df['thisUnmeth_geneDistalRegulatoryModulesDistance'] = np.where(df['geneDistalRegulatoryModulesDistance'] <0, df['thisUnmeth'], 0)
df['thisUnmeth_vistaEnhancersDistance'] = np.where(df['vistaEnhancersDistance'] <0, df['thisUnmeth'], 0)
df['thisUnmeth_3PrimeUTRDistance'] = np.where(df['3PrimeUTRDistance'] <0, df['thisUnmeth'], 0)
df['thisUnmeth_5PrimeUTRDistance'] = np.where(df['5PrimeUTRDistance'] <0, df['thisUnmeth'], 0)
df['thisUnmeth_firstExonDistance'] = np.where(df['firstExonDistance'] <0, df['thisUnmeth'], 0)
df['thisUnmeth_geneDistalRegulatoryModulesK562Distance'] = np.where(df['geneDistalRegulatoryModulesK562Distance'] <0, df['thisUnmeth'], 0)
df['thisUnmeth_hypoInHues64Distance'] = np.where(df['hypoInHues64Distance'] <0, df['thisUnmeth'], 0)
df['thisUnmeth_intergenic'] = np.where(intergenic_set, df['thisUnmeth'], 0)
df['thisUnmeth_shore'] = np.where((df['cgiDistance'].between(0, 2000, inclusive=False)), df['thisUnmeth'], 0)
df['thisUnmeth_shelf'] = np.where(((df['cgiDistance'] >= 2000) & (df['cgiDistance'] < 4000)), df['thisUnmeth'], 0)
df = df.groupby(['read_stack_ID'])[["filename", 'thisMeth', 'thisUnmeth', 'methReadCount', 'unmethReadCount', 'mixedReadCount',
'total_reads','totreads_tssDistance', 'totreads_genesDistance', 'totreads_exonsDistance', 'totreads_intronsDistance','totreads_promoterDistance', 'totreads_cgiDistance',
'totreads_ctcfDistance', 'totreads_ctcfUpDistance','totreads_ctcfDownDistance','totreads_geneDistalRegulatoryModulesDistance',
'totreads_vistaEnhancersDistance', 'totreads_3PrimeUTRDistance','totreads_5PrimeUTRDistance', 'totreads_firstExonDistance',
'totreads_geneDistalRegulatoryModulesK562Distance','totreads_hypoInHues64Distance', 'totreads_intergenic','totreads_shore',
'totreads_shelf', 'mixedReads_tssDistance', 'mixedReads_genesDistance','mixedReads_exonsDistance', 'mixedReads_intronsDistance','mixedReads_promoterDistance',
'mixedReads_cgiDistance','mixedReads_ctcfDistance', 'mixedReads_ctcfUpDistance','mixedReads_ctcfDownDistance',
'mixedReads_geneDistalRegulatoryModulesDistance','mixedReads_vistaEnhancersDistance', 'mixedReads_3PrimeUTRDistance',
'mixedReads_5PrimeUTRDistance', 'mixedReads_firstExonDistance','mixedReads_geneDistalRegulatoryModulesK562Distance','mixedReads_hypoInHues64Distance', 'mixedReads_intergenic',
'mixedReads_shore', 'mixedReads_shelf', 'thisMeth_tssDistance','thisMeth_genesDistance','thisMeth_exonsDistance', 'thisMeth_intronsDistance',
'thisMeth_promoterDistance', 'thisMeth_cgiDistance','thisMeth_ctcfDistance', 'thisMeth_ctcfUpDistance','thisMeth_ctcfDownDistance',
'thisMeth_geneDistalRegulatoryModulesDistance','thisMeth_vistaEnhancersDistance', 'thisMeth_3PrimeUTRDistance',
'thisMeth_5PrimeUTRDistance', 'thisMeth_firstExonDistance','thisMeth_geneDistalRegulatoryModulesK562Distance',
'thisMeth_hypoInHues64Distance', 'thisMeth_intergenic','thisMeth_shore', 'thisMeth_shelf', 'thisUnmeth_tssDistance','thisUnmeth_genesDistance',
'thisUnmeth_exonsDistance', 'thisUnmeth_intronsDistance','thisUnmeth_promoterDistance', 'thisUnmeth_cgiDistance',
'thisUnmeth_ctcfDistance', 'thisUnmeth_ctcfUpDistance','thisUnmeth_ctcfDownDistance','thisUnmeth_geneDistalRegulatoryModulesDistance',
'thisUnmeth_vistaEnhancersDistance', 'thisUnmeth_3PrimeUTRDistance','thisUnmeth_5PrimeUTRDistance', 'thisUnmeth_firstExonDistance',
'thisUnmeth_geneDistalRegulatoryModulesK562Distance','thisUnmeth_hypoInHues64Distance', 'thisUnmeth_intergenic','thisUnmeth_shore', 'thisUnmeth_shelf']].sum()
df = df[['thisMeth', 'thisUnmeth', 'methReadCount', 'unmethReadCount', 'mixedReadCount',
'total_reads', 'totreads_tssDistance', 'totreads_genesDistance', 'totreads_exonsDistance', 'totreads_intronsDistance','totreads_promoterDistance', 'totreads_cgiDistance',
'totreads_ctcfDistance', 'totreads_ctcfUpDistance','totreads_ctcfDownDistance','totreads_geneDistalRegulatoryModulesDistance',
'totreads_vistaEnhancersDistance', 'totreads_3PrimeUTRDistance','totreads_5PrimeUTRDistance', 'totreads_firstExonDistance',
'totreads_geneDistalRegulatoryModulesK562Distance','totreads_hypoInHues64Distance', 'totreads_intergenic','totreads_shore',
'totreads_shelf', 'mixedReads_tssDistance', 'mixedReads_genesDistance','mixedReads_exonsDistance', 'mixedReads_intronsDistance','mixedReads_promoterDistance',
'mixedReads_cgiDistance','mixedReads_ctcfDistance', 'mixedReads_ctcfUpDistance','mixedReads_ctcfDownDistance',
'mixedReads_geneDistalRegulatoryModulesDistance','mixedReads_vistaEnhancersDistance', 'mixedReads_3PrimeUTRDistance',
'mixedReads_5PrimeUTRDistance', 'mixedReads_firstExonDistance','mixedReads_geneDistalRegulatoryModulesK562Distance','mixedReads_hypoInHues64Distance', 'mixedReads_intergenic',
'mixedReads_shore', 'mixedReads_shelf', 'thisMeth_tssDistance', 'thisMeth_genesDistance','thisMeth_exonsDistance', 'thisMeth_intronsDistance',
'thisMeth_promoterDistance', 'thisMeth_cgiDistance','thisMeth_ctcfDistance', 'thisMeth_ctcfUpDistance','thisMeth_ctcfDownDistance',
'thisMeth_geneDistalRegulatoryModulesDistance','thisMeth_vistaEnhancersDistance', 'thisMeth_3PrimeUTRDistance',
'thisMeth_5PrimeUTRDistance', 'thisMeth_firstExonDistance','thisMeth_geneDistalRegulatoryModulesK562Distance',
'thisMeth_hypoInHues64Distance', 'thisMeth_intergenic','thisMeth_shore', 'thisMeth_shelf', 'thisUnmeth_tssDistance','thisUnmeth_genesDistance',
'thisUnmeth_exonsDistance', 'thisUnmeth_intronsDistance','thisUnmeth_promoterDistance', 'thisUnmeth_cgiDistance',
'thisUnmeth_ctcfDistance', 'thisUnmeth_ctcfUpDistance','thisUnmeth_ctcfDownDistance','thisUnmeth_geneDistalRegulatoryModulesDistance',
'thisUnmeth_vistaEnhancersDistance', 'thisUnmeth_3PrimeUTRDistance','thisUnmeth_5PrimeUTRDistance', 'thisUnmeth_firstExonDistance',
'thisUnmeth_geneDistalRegulatoryModulesK562Distance','thisUnmeth_hypoInHues64Distance', 'thisUnmeth_intergenic','thisUnmeth_shore', 'thisUnmeth_shelf']].sum()
df["filename"] = str(filename)
try:
df["methylation_tssDistance"] = df["thisMeth_tssDistance"]/(df["thisMeth_tssDistance"]+df["thisUnmeth_tssDistance"])
except ZeroDivisionError:
df["methylation_tssDistance"] = 0
try:
df["methylation_genesDistance"] = df["thisMeth_genesDistance"]/(df["thisMeth_genesDistance"]+df["thisUnmeth_genesDistance"])
except ZeroDivisionError:
df["methylation_genesDistance"] = 0
try:
df["methylation_exonsDistance"] = df["thisMeth_exonsDistance"]/(df["thisMeth_exonsDistance"]+df["thisUnmeth_exonsDistance"])
except ZeroDivisionError:
df["methylation_exonsDistance"] = 0
try:
df["methylation_intronsDistance"] = df["thisMeth_intronsDistance"]/(df["thisMeth_intronsDistance"]+df["thisUnmeth_intronsDistance"])
except ZeroDivisionError:
df["methylation_intronsDistance"] = 0
try:
df["methylation_promoterDistance"] = df["thisMeth_promoterDistance"]/(df["thisMeth_promoterDistance"]+df["thisUnmeth_promoterDistance"])
except ZeroDivisionError:
df["methylation_promoterDistance"] = 0
try:
df["methylation_cgiDistance"] = df["thisMeth_cgiDistance"]/(df["thisMeth_cgiDistance"]+df["thisUnmeth_cgiDistance"])
except ZeroDivisionError:
df["methylation_cgiDistance"] = 0
try:
df["methylation_ctcfDistance"] = df["thisMeth_ctcfDistance"]/(df["thisMeth_ctcfDistance"]+df["thisUnmeth_ctcfDistance"])
except ZeroDivisionError:
df["methylation_ctcfDistance"] = 0
try:
df["methylation_ctcfUpDistance"] = df["thisMeth_ctcfUpDistance"]/(df["thisMeth_ctcfUpDistance"]+df["thisUnmeth_ctcfUpDistance"])
except ZeroDivisionError:
df["methylation_ctcfUpDistance"] = 0
try:
df["methylation_ctcfDownDistance"] = df["thisMeth_ctcfDownDistance"]/(df["thisMeth_ctcfDownDistance"]+df["thisUnmeth_ctcfDownDistance"])
except ZeroDivisionError:
df["methylation_ctcfDownDistance"] = 0
try:
df["methylation_geneDistalRegulatoryModulesDistance"] = df["thisMeth_geneDistalRegulatoryModulesDistance"]/(df["thisMeth_geneDistalRegulatoryModulesDistance"]+df["thisUnmeth_geneDistalRegulatoryModulesDistance"])
except ZeroDivisionError:
df["methylation_geneDistalRegulatoryModulesDistance"] = 0
try:
df["methylation_vistaEnhancersDistance"] = df["thisMeth_vistaEnhancersDistance"]/(df["thisMeth_vistaEnhancersDistance"]+df["thisUnmeth_vistaEnhancersDistance"])
except ZeroDivisionError:
df["methylation_vistaEnhancersDistance"] = 0
try:
df["methylation_3PrimeUTRDistance"] = df["thisMeth_3PrimeUTRDistance"]/(df["thisMeth_3PrimeUTRDistance"]+df["thisUnmeth_3PrimeUTRDistance"])
except ZeroDivisionError:
df["methylation_3PrimeUTRDistance"] = 0
try:
df["methylation_5PrimeUTRDistance"] = df["thisMeth_5PrimeUTRDistance"]/(df["thisMeth_5PrimeUTRDistance"]+df["thisUnmeth_5PrimeUTRDistance"])
except ZeroDivisionError:
df["methylation_PrimeUTRDistance"] = 0
try:
df["methylation_firstExonDistance"] = df["thisMeth_firstExonDistance"]/(df["thisMeth_firstExonDistance"]+df["thisUnmeth_firstExonDistance"])
except ZeroDivisionError:
df["methylation_genesDistance"] = 0
try:
df["methylation_geneDistalRegulatoryModulesK562Distance"] = df["thisMeth_geneDistalRegulatoryModulesK562Distance"]/(df["thisMeth_geneDistalRegulatoryModulesK562Distance"]+df["thisUnmeth_geneDistalRegulatoryModulesK562Distance"])
except ZeroDivisionError:
df["methylation_genesDistance"] = 0
try:
df["methylation_hypoInHues64Distance"] = df["thisMeth_hypoInHues64Distance"]/(df["thisMeth_hypoInHues64Distance"]+df["thisUnmeth_hypoInHues64Distance"])
except ZeroDivisionError:
df["methylation_genesDistance"] = 0
try:
df["methylation_intergenic"] = df["thisMeth_intergenic"]/(df["thisMeth_intergenic"]+df["thisUnmeth_intergenic"])
except ZeroDivisionError:
df["methylation_genesDistance"] = 0
try:
df["methylation_shore"] = df["thisMeth_shore"]/(df["thisMeth_shore"]+df["thisUnmeth_shore"])
except ZeroDivisionError:
df["methylation_genesDistance"] = 0
try:
df["methylation_shelf"] = df["thisMeth_shelf"]/(df["thisMeth_shelf"]+df["thisUnmeth_shelf"])
except ZeroDivisionError:
df["methylation_genesDistance"] = 0
try:
df["PDR_tssDistance"] = df["mixedReads_genesDistance"]/df["totreads_tssDistance"]
except ZeroDivisionError:
df["PDR_tssDistance"] = 0
try:
df["PDR_genesDistance"] = df["mixedReads_genesDistance"]/df["totreads_genesDistance"]
except ZeroDivisionError:
df["PDR_genesDistance"] = 0
try:
df["PDR_exonsDistance"] = df["mixedReads_exonsDistance"]/df["totreads_exonsDistance"]
except ZeroDivisionError:
df["PDR_exonsDistance"] = 0
try:
df["PDR_intronsDistance"] = df["mixedReads_intronsDistance"]/df["totreads_intronsDistance"]
except ZeroDivisionError:
df["PDR_intronsDistance"] = 0
try:
df["PDR_promoterDistance"] = df["mixedReads_promoterDistance"]/df["totreads_promoterDistance"]
except ZeroDivisionError:
df["PDR_promoterDistance"] = 0
try:
df["PDR_cgiDistance"] = df["mixedReads_cgiDistance"]/df["totreads_cgiDistance"]
except ZeroDivisionError:
df["PDR_cgiDistance"] = 0
try:
df["PDR_ctcfDistance"] = df["mixedReads_ctcfDistance"]/df["totreads_ctcfDistance"]
except ZeroDivisionError:
df["PDR_ctcfDistance"] = 0
try:
df["PDR_ctcfUpDistance"] = df["mixedReads_ctcfUpDistance"]/df["totreads_ctcfUpDistance"]
except ZeroDivisionError:
df["PDR_ctcfUpDistance"] = 0
try:
df["PDR_ctcfDownDistance"] = df["mixedReads_ctcfDownDistance"]/df["totreads_ctcfDownDistance"]
except ZeroDivisionError:
df["PDR_ctcfDownDistance"] = 0
try:
df["PDR_geneDistalRegulatoryModulesDistance"] = df["mixedReads_geneDistalRegulatoryModulesDistance"]/df["totreads_geneDistalRegulatoryModulesDistance"]
except ZeroDivisionError:
df["PDR_geneDistalRegulatoryModulesDistance"] = 0
try:
df["PDR_vistaEnhancersDistance"] = df["mixedReads_vistaEnhancersDistance"]/df["totreads_vistaEnhancersDistance"]
except ZeroDivisionError:
df["PDR_vistaEnhancersDistance"] = 0
try:
df["PDR_3PrimeUTRDistance"] = df["mixedReads_3PrimeUTRDistance"]/df["totreads_3PrimeUTRDistance"]
except ZeroDivisionError:
df["PDR_3PrimeUTRDistance"] = 0
try:
df["PDR_5PrimeUTRDistance"] = df["mixedReads_5PrimeUTRDistance"]/df["totreads_5PrimeUTRDistance"]
except ZeroDivisionError:
df["PDR_5PrimeUTRDistance"] = 0
try:
df["PDR_firstExonDistance"] = df["mixedReads_firstExonDistance"]/df["totreads_firstExonDistance"]
except ZeroDivisionError:
df["PDR_firstExonDistance"] = 0
try:
df["PDR_geneDistalRegulatoryModulesK562Distance"] = df["mixedReads_geneDistalRegulatoryModulesK562Distance"]/df["totreads_geneDistalRegulatoryModulesK562Distance"]
except ZeroDivisionError:
df["PDR_geneDistalRegulatoryModulesK562Distance"] = 0
try:
df["PDR_hypoInHues64Distance"] = df["mixedReads_hypoInHues64Distance"]/df["totreads_hypoInHues64Distance"]
except ZeroDivisionError:
df["PDR_hypoInHues64Distance"] = 0
try:
df["PDR_intergenic"] = df["mixedReads_intergenic"]/df["totreads_intergenic"]
except ZeroDivisionError:
df["PDR_intergenic"] = 0
try:
df["PDR_shore"] = df["mixedReads_shore"]/df["totreads_shore"]
except ZeroDivisionError:
df["PDR_shore"] = 0
try:
df["PDR_shelf"] = df["mixedReads_shelf"]/df["totreads_shelf"]
except ZeroDivisionError:
df["PDR_shelf"] = 0
df["filename"] = str(filename)
newdf1 = newdf1.append(df, ignore_index=True)
newdf1 = newdf1[["filename", "methylation_tssDistance","methylation_genesDistance","methylation_exonsDistance","methylation_intronsDistance",
"methylation_promoterDistance","methylation_cgiDistance","methylation_ctcfDistance","methylation_ctcfUpDistance","methylation_ctcfDownDistance",
"methylation_geneDistalRegulatoryModulesDistance","methylation_vistaEnhancersDistance","methylation_3PrimeUTRDistance","methylation_5PrimeUTRDistance", "methylation_firstExonDistance","methylation_geneDistalRegulatoryModulesK562Distance","methylation_hypoInHues64Distance","methylation_intergenic",
"methylation_shore","methylation_shelf","PDR_tssDistance","PDR_genesDistance","PDR_exonsDistance","PDR_intronsDistance", "PDR_promoterDistance",
"PDR_cgiDistance", "PDR_ctcfDistance","PDR_ctcfUpDistance","PDR_ctcfDownDistance","PDR_geneDistalRegulatoryModulesDistance",
"PDR_vistaEnhancersDistance","PDR_3PrimeUTRDistance", "PDR_5PrimeUTRDistance","PDR_firstExonDistance","PDR_geneDistalRegulatoryModulesK562Distance",
"PDR_hypoInHues64Distance","PDR_intergenic","PDR_shore","PDR_shelf"]]
newdf1.to_csv("mcell_regions.csv", index=False)
| evanbiederstedt/RRBSfun | scripts/mcell_regions.py | Python | mit | 25,304 | [
"MCell"
] | 97f029bb036b688859fc5264b387c116a064b0044f71f7155bdaf1c3fd575ca4 |
'''
Identify people and umbrellas moving down the street - using top down static video.
Bare bones version of the program created by - https://github.com/jlark/rainAlert
track and detect umbrellas (to detect rainy days). The original version included
a significantly more advanced training model along with hooks to make twitter alerts.
That has all been removed from this version, I just wanted to created a simplified
version to serve as an example for myself (still learning OpenCV) and others. I have
removed everything which did not clearly serve my goals -
Identify moving objects.
Create a simple program to draw bounding boxes around moving objects.
Get basic experience working with some of the OpenCV tools.
I wanted to provide a working starter project - since I was unable to find
anything of moderate complexity to start from (when I started working on
OpenCV 10/26/2013).
Again - all credit goes to the original author https://github.com/jlark/rainAlert
which is the starting point I used to clean up and comment. I have added references
to OpenCV documentation or stack overflow questions for sections of the code
I did not understand when doing my initial audit.
Thanks - Anthony Honstain
'''
import numpy as np
import cv2
import cv2.cv as cv
class BasicTracker:
def __init__(self):
self.erode_iteration = 1
# what size to limit our bounding boxes too
self.sizeL = 4000
self.sizeM = 1500
# kernal size for erode and dilate
self.kernalH = 3
self.kernalW = 3
self.kernel = np.ones((self.kernalH,self.kernalW),'uint8')
self.contours = []
# tracking will start after this many frames
self.start_finding_contours = 10
'''
Use MOG BG extractor - this will 'learn' over time, so we create a single instance
and reuse it.
WARNING - if camera moves this needs to be reset.
Description on BackgroundSubtractorMOG
http://stackoverflow.com/questions/10458633/how-to-use-cvbackgroundsubtractormog-in-opencv
http://docs.opencv.org/modules/video/doc/motion_analysis_and_object_tracking.html?highlight=backgroundsubtractormog#backgroundsubtractormog-backgroundsubtractormog
http://docs.opencv.org/java/org/opencv/video/BackgroundSubtractorMOG.html
'''
self.background_subtractor = cv2.BackgroundSubtractorMOG(24*60, 1, 0.8, 0.5)
def find_contours(self,frame):
'''
Find the contours in the image
frame - image should be black and white
'''
conts, hier = cv2.findContours(frame, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
# Note - may need to check size of contours (want to ignore ones that are too small or large)
return conts
def draw_contours(self, image, contours, sizeL, sizeM, color):
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h), color,2)
return image
def contour_size_filter(self, contours, sizeL, sizeM):
'''
filter out contours that fit a certain size
'''
filtered = []
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
area = float(w)*float(h)
if area < sizeL and area > sizeM:
filtered.append(cnt)
return filtered
def make_black_white(self,f):
"""
Overview of cvtColor - Converts an image from one color space to another.
Python: cv2.cvtColor(src, code[, dst[, dstCn]]) -> dst
"""
# http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
bwImg = cv2.cvtColor(f,cv.CV_RGB2GRAY)
return bwImg
def background_smoothing(self, frame_black_white):
"""
Basic background smoothing to generate a foreground mask.
Apply gaussian blur, equalizeHist, and then BackgroundSubtractorMOG
NOTE - if camera moves, the background subtractor needs to be reset.
"""
# Gaussian weight ratio
blur_kp = 1
frame_black_white = cv2.GaussianBlur(frame_black_white, (blur_kp, blur_kp), 1)
# http://opencvpython.blogspot.com/2013/03/histograms-2-histogram-equalization.html
frame_black_white = cv2.equalizeHist(frame_black_white)
foreground_mask = self.background_subtractor.apply(frame_black_white)
return foreground_mask
def track(self, video_capture):
"""
Primary function for rain detection - main loop to read the video input,
extract a frame, and process it.
"""
contour_counter = 0
while 1:
_, frame_raw = video_capture.read()
# Check to see if we have a valid frame so we
# don't get a strange error from opencv.
# http://stackoverflow.com/questions/16703345/how-can-i-use-opencv-python-to-read-a-video-file-without-looping-mac-os
if (type(frame_raw) == type(None)):
print "End of video - exiting"
break
# Use b/w for the rest of our processing.
frame_black_white = self.make_black_white(frame_raw)
# Background removal
foreground_mask = self.background_smoothing(frame_black_white)
# Dilate and erode using the mask
eroded_frame = cv2.erode(foreground_mask, self.kernel, iterations=self.erode_iteration)
# Find the contours and filter them by size.
if contour_counter > self.start_finding_contours:
self.contours = self.find_contours(eroded_frame)
self.contours = self.contour_size_filter(self.contours, self.sizeL, self.sizeM)
frame_color = cv2.cvtColor(frame_black_white, cv2.COLOR_GRAY2BGR)
frame_color_contoured = self.draw_contours(frame_color,
self.contours,
self.sizeL,
self.sizeM,
(255,255,0))
contour_counter = contour_counter + 1
cv2.imshow('Color Image with Contours', frame_color_contoured)
#break if esc is hit
k = cv2.waitKey(20)
if k == 27:
break
def main_loop():
video_capture = cv2.VideoCapture("./street_view_topdown.mp4")
tracker = BasicTracker()
tracker.track(video_capture)
if __name__ == '__main__':
print "Start application - begin primary loop to process video."
main_loop()
| AnthonyHonstain/SimpleRainAlert | basic_tracker.py | Python | mit | 6,890 | [
"Gaussian"
] | de9db2e6fece4c62e3e20615b359150c14f97fca1d44d0264291d57ca72cbec5 |
from pathlib import Path
from clldutils.sfm import SFM, Entry
def test_Dictionary(tmp_path):
p = Path(__file__).parent.joinpath('fixtures', 'test.sfm')
d = SFM.from_file(p, keep_empty=True)
assert d[1].get('empty') is not None
d = SFM.from_file(p)
assert len(d) == 2
assert d[1].get('empty') is None
tmp = tmp_path / 'test'
d.write(tmp)
d2 = SFM()
d2.read(tmp)
assert d[0].get('marker') == d2[0].get('marker')
assert d[1].get('marker') == d2[1].get('marker')
assert d[0].get('key') is None
d.visit(lambda e: e.append(('key', 'value')))
assert d[0].get('key') == 'value'
def test_Entry():
e = Entry.from_string('\\lx1 lexeme\n\\marker äöü\nabc\n\\marker next val')
assert e.get('lx1') == 'lexeme'
assert e.get('marker') == 'äöü\nabc'
assert e.getall('marker')[1] == 'next val'
assert e.markers()['marker'] == 2
assert e.get('x', 5) == 5
e = Entry.from_string('\\empty\n')
assert e.get('empty') is None
e = Entry.from_string('\\empty\n', keep_empty=True)
assert e.get('empty') is not None
def test_Entry_with_numeric_marker():
e = Entry.from_string('\\z10_Eng abc')
assert e.get('z10_Eng') == 'abc'
| clld/clldutils | tests/test_sfm.py | Python | apache-2.0 | 1,223 | [
"VisIt"
] | 52b20fce38a9c656f407af006115f688e7848911a7870f9d834740922acc0660 |
#!/usr/bin/env python
"""
Simple script for testing various SfePy functionality, examples not
covered by tests, and running the tests.
The script just runs the commands specified in its main() using the
`subprocess` module, captures the output and compares one or more key
words to the expected ones.
The output of failed commands is saved to 'test_install.log' file.
"""
from __future__ import print_function
from __future__ import absolute_import
import time
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shlex
import subprocess
import logging
import re
DEBUG_FMT = '*' * 55 + '\n%s\n' + '*' * 55
def _get_logger(filename='test_install.log'):
"""
Convenience function to set-up output and logging.
"""
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
logger = _get_logger()
def check_output(cmd):
"""
Run the specified command and capture its outputs.
Returns
-------
out : tuple
The (stdout, stderr) output tuple.
"""
logger.info(cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = [ii.decode() for ii in p.communicate()]
return out
def report(out, name, line, item, value, eps=None, return_item=False,
match_numbers=False):
"""
Check that `item` at `line` of the output string `out` is equal
to `value`. If not, print the output.
"""
try:
if match_numbers:
status = out.split('\n')[line]
else:
status = out.split('\n')[line].split()
except IndexError:
logger.error(' not enough output from command!')
ok = False
else:
try:
if match_numbers:
pat = '([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?[jJ]?)'
matches = re.findall(pat, status)
status_item = matches[item]
else:
status_item = status[item]
logger.info(' comparing: %s %s', status_item, value)
if eps is None:
ok = (status_item == value)
else:
try:
ok = abs(float(status_item) - float(value)) < eps
except:
ok = False
except IndexError:
ok = False
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, status[item]
else:
return ok
def report2(out, name, items, return_item=False):
"""
Check that `items` are in the output string `out`.
If not, print the output.
"""
ok = True
for s in items:
logger.info(' checking: %s', s)
if s not in out:
ok = False
break
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, s
else:
return ok
def report_tests(out, return_item=False):
"""
Check that all tests in the output string `out` passed.
If not, print the output.
"""
search = re.compile('([0-9]+) test file\(s\) executed in ([0-9.]+) s, ([0-9]+) failure\(s\) of ([0-9]+) test\(s\)').search
try:
stats = search(out).groups()
except AttributeError:
stats = '0', '0', '-1', '0'
ok = False
ok = stats[2] == '0'
logger.info(' %s test file(s) executed in %s s, %s failure(s) of %s test(s)'
% (stats[0], stats[1], stats[2], stats[3]))
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, stats[2]
else:
return ok
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.parse_args()
fd = open('test_install.log', 'w')
fd.close()
if sys.version_info[0] < 3:
cmd = 'python2'
else:
cmd = 'python3'
eok = 0
t0 = time.time()
out, err = check_output('%s ./script/blockgen.py' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/cylindergen.py' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/convert_mesh.py meshes/3d/cylinder.vtk out.mesh' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/tile_periodic_mesh.py -r 2,2 meshes/elements/2_4_2.mesh out-per.mesh' % cmd)
eok += report(out, '...', -2, 1, 'done.')
out, err = check_output('%s ./script/extract_surface.py meshes/various_formats/octahedron.node -' % cmd)
eok += report(out, '...', -2, 0, '1185')
out, err = check_output('%s ./simple.py examples/diffusion/poisson.py' % cmd)
eok += report(out, '...', -3, 5, '1.173819e-16', eps=1e-15)
out, err = check_output("""%s ./simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" examples/diffusion/poisson.py""" %cmd)
eok += report(out, '...', -3, 5, '2.308051e-16', eps=1e-15)
out, err = check_output('%s ./simple.py examples/diffusion/poisson_iga.py' % cmd)
eok += report(out, '...', -3, 5, '3.373487e-15', eps=1e-14)
out, err = check_output('%s ./simple.py examples/navier_stokes/stokes.py' % cmd)
eok += report(out, '...', -3, 5, '1.210678e-13', eps=1e-11)
out, err = check_output('%s ./simple.py examples/diffusion/poisson_parametric_study.py' % cmd)
eok += report(out, '...', -3, 5, '1.606408e-14', eps=1e-13)
out, err = check_output('%s ./simple.py examples/linear_elasticity/its2D_3.py' % cmd)
eok += report(out, '...', -24, 5, '3.964886e-12', eps=1e-11)
eok += report(out, '...', -4, 4, '2.58660e+01', eps=1e-5)
out, err = check_output('%s ./simple.py examples/linear_elasticity/linear_elastic.py --format h5' % cmd)
eok += report(out, '...', -3, 5, '4.638192e-18', eps=1e-15)
out, err = check_output('%s ./extractor.py -d cylinder.h5' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./postproc.py -n --no-offscreen -o cylinder.png cylinder.h5' % cmd)
eok += report(out, '...', -3, 2, 'cylinder.png...')
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py' % cmd)
eok += report(out, '...', -9, 0, '2.08545116e+08', match_numbers=True)
eok += report(out, '...', -8, 1, '1.16309223e+11', match_numbers=True)
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py --phase-velocity' % cmd)
eok += report(out, '...', -2, 0, '4189.41229592', match_numbers=True)
eok += report(out, '...', -2, 1, '2620.55608256', match_numbers=True)
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py -d' % cmd)
eok += report(out, '...', -6, 1, '[0,')
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps_rigid.py' % cmd)
eok += report(out, '...', -9, 0, '4.58709531e+07', match_numbers=True)
eok += report(out, '...', -8, 1, '1.13929200e+11', match_numbers=True)
out, err = check_output('%s ./simple.py examples/quantum/hydrogen.py' % cmd)
eok += report(out, '...', -2, -2, '-0.01913506', eps=1e-4)
out, err = check_output('%s ./homogen.py examples/homogenization/perfusion_micro.py' % cmd)
eok += report2(out, '...', ['computing EpA', 'computing PA_3',
'computing GA', 'computing EmA',
'computing KA'])
out, err = check_output('%s examples/homogenization/rs_correctors.py -n' % cmd)
eok += report(out, '...', -2, -1, '1.644e-01', match_numbers=True)
out, err = check_output('%s examples/large_deformation/compare_elastic_materials.py -n' % cmd)
eok += report(out, '...', -3, 5, '1.068759e-14', eps=1e-13)
out, err = check_output('%s examples/linear_elasticity/linear_elastic_interactive.py' % cmd)
eok += report(out, '...', -16, 0, '1.62128841139e-14', eps=1e-13)
out, err = check_output('%s examples/linear_elasticity/modal_analysis.py' % cmd)
eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)
out, err = check_output('%s examples/multi_physics/thermal_electric.py' % cmd)
eok += report(out, '...', -4, 5, '2.612933e-14', eps=1e-13)
out, err = check_output('%s examples/diffusion/laplace_refine_interactive.py output' % cmd)
eok += report(out, '...', -3, 5, '2.675866e-15', eps=1e-13)
out, err = check_output('%s examples/diffusion/laplace_iga_interactive.py -o output-tests' % cmd)
eok += report(out, '...', -3, 5, '1.028134e-13', eps=1e-12)
out, err = check_output('%s examples/dg/imperative_burgers_1D.py -o output-tests' % cmd)
eok += report(out, '...', -3, 3, 'moment_1D_limiter')
out, err = check_output('mpiexec -n 2 %s examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)
out, err = check_output('mpiexec -n 2 %s examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-8)
t1 = time.time()
out, err = check_output('%s ./run_tests.py' % cmd)
tok, failed = report_tests(out, return_item=True)
tok = {True : 'ok', False : 'fail'}[tok]
t2 = time.time()
fd = open('test_install_times.log', 'a+')
fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
% (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
fd.close()
if __name__ == '__main__':
main()
| sfepy/sfepy | test_install.py | Python | bsd-3-clause | 10,002 | [
"VTK"
] | 80483ae5cf8315684836f7eb4911bc3679d7170def27c4ae742e8ebfbbf1f208 |
__author__ = 'robert'
import sys
if (sys.version_info < (2, 7, 0)):
import unittest2 as unittest
else:
import unittest
try:
import brian
import pypet.brian
from pypet.brian import *
except ImportError as exc:
#print('Import Error: %s' % str(exc))
brian = None
from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite
import inspect
@unittest.skipIf(brian is None, 'Can only be run with brian!')
class TestAllBrianImport(unittest.TestCase):
tags = 'unittest', 'brian', 'import'
def test_import_star(self):
for class_name in pypet.brian.__all__:
logstr = 'Evaluauting %s: %s' % (class_name, repr(eval(class_name)))
get_root_logger().info(logstr)
def test_if_all_is_complete(self):
for item in pypet.brian.__dict__.values():
if inspect.isclass(item) or inspect.isfunction(item):
self.assertTrue(item.__name__ in pypet.brian.__all__)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args) | nigroup/pypet | pypet/tests/unittests/briantests/module_test.py | Python | bsd-3-clause | 1,053 | [
"Brian"
] | 64a528b14bdb9dd790a96d85fdee6b7e428343bd3516e34be37b486c0c2fdfc9 |
from jasp import *
from ase.io import read, write
from ase.io.vasp import read_vasp, write_vasp
'''
code for running NEB calculations in jasp
here is typical code to set up the band:
with jasp('../surfaces/Pt-slab-O-fcc') as calc:
initial_atoms = calc.get_atoms()
with jasp('../surfaces/Pt-slab-O-hcp') as calc:
final_atoms = calc.get_atoms()
images = [initial_atoms]
images += [initial_atoms.copy() for i in range(3)]
images += [final_atoms]
neb = NEB(images)
# Interpolate linearly the positions of the three middle images:
neb.interpolate()
with jasp('O-diffusion',
ibrion=2,
nsw=50,
images=5, # initial + nimages + final
spring=-5,
atoms=images) as calc:
images, energies = calc.get_neb()
The spring tag triggers the setup of an NEB calculation for Jasp.
'''
import logging
log = logging.getLogger('Jasp')
def get_neb(self, npi=1):
'''Returns images, energies if available or runs the job.
npi = nodes per image for running the calculations. Default=1
'''
# how do we know if we need to run jobs? if jobid exists that means
# it is or was queued
#
# if no jobid, and no OUTCAR for each image, then calculation
# required.
#
# It is also possible a keyword has changed, and that a calculation
# is required.
calc_required = False
if self.job_in_queue():
from jasp import VaspQueued
raise VaspQueued
else:
if os.path.exists('jobid'):
os.unlink('jobid')
# check for OUTCAR in each image dir
for i in range(1, len(self.neb_images)-1):
wf = '{0}/OUTCAR'.format(str(i).zfill(2))
if not os.path.exists(wf):
log.debug('calc_required in {0}'.format(wf))
calc_required = True
break
else:
# there was an OUTCAR, now we need to check for
# convergence.
def subdir_converged(fname):
f = open(fname)
for line in f:
if 'reached required accuracy - stopping structural energy minimisation' in line:
return True
f.close()
return False
converged = subdir_converged('{0}/OUTCAR'.format(str(i).zfill(2)))
if not converged:
print '{0} does not appear converged'.format(str(i).zfill(2))
# make sure no keywords have changed
if not ((self.float_params == self.old_float_params) and
(self.exp_params == self.old_exp_params) and
(self.string_params == self.old_string_params) and
(self.int_params == self.old_int_params) and
(self.bool_params == self.old_bool_params) and
(self.list_params == self.old_list_params) and
(self.input_params == self.old_input_params) and
(self.dict_params == self.old_dict_params)):
calc_required = True
log.debug('Calculation is required')
log.debug(self.vaspdir)
if calc_required:
'''
this creates the directories and files if needed.
write out the poscar for all the images. write out the kpoints and
potcar.
'''
if os.path.exists('jobid'):
raise VaspQueued
# write out all the images, including initial and final
for i, atoms in enumerate(self.neb_images):
image_dir = str(i).zfill(2)
if not os.path.isdir(image_dir):
# create if needed.
os.makedirs(image_dir)
# this code is copied from
# ase.calculators.vasp.initialize to get the sorting
# correct.
p = self.input_params
self.all_symbols = atoms.get_chemical_symbols()
self.natoms = len(atoms)
self.spinpol = atoms.get_initial_magnetic_moments().any()
atomtypes = atoms.get_chemical_symbols()
# Determine the number of atoms of each atomic species
# sorted after atomic species
special_setups = []
symbols = []
symbolcount = {}
if self.input_params['setups']:
for m in self.input_params['setups']:
try:
special_setups.append(int(m))
except ValueError:
continue
for m, atom in enumerate(atoms):
symbol = atom.symbol
if m in special_setups:
pass
else:
if symbol not in symbols:
symbols.append(symbol)
symbolcount[symbol] = 1
else:
symbolcount[symbol] += 1
# Build the sorting list
self.sort = []
self.sort.extend(special_setups)
for symbol in symbols:
for m, atom in enumerate(atoms):
if m in special_setups:
pass
else:
if atom.symbol == symbol:
self.sort.append(m)
self.resort = range(len(self.sort))
for n in range(len(self.resort)):
self.resort[self.sort[n]] = n
self.atoms_sorted = atoms[self.sort]
# Check if the necessary POTCAR files exists and
# create a list of their paths.
self.symbol_count = []
for m in special_setups:
self.symbol_count.append([atomtypes[m], 1])
for m in symbols:
self.symbol_count.append([m, symbolcount[m]])
write_vasp('{0}/POSCAR'.format(image_dir),
self.atoms_sorted,
symbol_count = self.symbol_count)
cwd = os.getcwd()
os.chdir(image_dir)
self.write_sort_file()
os.chdir(cwd)
f = open('00/energy', 'w')
f.write(str(self.neb_initial_energy))
f.close()
f = open('{0}/energy'.format(str(len(self.neb_images) - 1).zfill(2)), 'w')
f.write(str(self.neb_final_energy))
f.close()
# originally I only created these if they did not exist. that
# doesn't modfiy the incar if you add variables though. I am
# too lazy right now to write code that checks if a change was
# made, and we just write it out here.
self.write_kpoints()
self.initialize(self.neb_images[0])
self.write_potcar()
self.write_incar(self.neb_images[0])
if JASPRC['scheduler'].lower() == 'sge':
JASPRC['queue.nprocs'] = npi * self.neb_nimages
log.debug('Running on %i processors',JASPRC['queue.nprocs'])
elif JASPRC['scheduler'].lower() == 'pbs':
JASPRC['queue.nodes'] = npi * self.neb_nimages
log.debug('Running on %i nodes',JASPRC['queue.nodes'])
self.run() # this will raise VaspSubmitted
#############################################
# now we are just retrieving results
images = [self.neb_images[0]]
energies = [self.neb_initial_energy] # this is a tricky point. unless
# the calc stores an absolute
# path, it may be tricky to call
# get_potential energy
log.debug('self.neb_nimages = %i', self.neb_nimages)
for i in range(1, self.neb_nimages + 1):
log.debug(self.neb_images[i].numbers)
nebd = str(i).zfill(2)
try:
os.chdir(nebd)
log.debug('in %s' % nebd)
log.debug(os.listdir('.'))
energies += [self.read_energy()[1]]
atoms = read('CONTCAR')
# I do not understand why this is needed to resort the
# atoms! If I don't do it, the calculations are wrong. If
# I do it here, it is wrong somewhere else.
f = open('ase-sort.dat')
sort, resort = [], []
for line in f:
s, r = [int(x) for x in line.split()]
sort.append(s)
resort.append(r)
log.debug('read %i: %s', i, str(atoms.numbers))
log.debug('read %i: %s', i, str(atoms.get_chemical_symbols()))
images += [atoms[resort]]
finally:
os.chdir('..')
images += [self.neb_images[-1]]
energies += [self.neb_final_energy]
return (images, np.array(energies))
Vasp.get_neb = get_neb
def plot_neb(self, show=True):
'''Return a list of the energies and atoms objects for each image in
the band.
by default shows the plot figure
'''
import jasp
try:
images, energies = self.get_neb()
except (jasp.VaspQueued):
# let's get a snapshot of the progress
calc = read_neb_calculator()
images = calc.neb_images
energies = []
energies += [float(open('00/energy').readline())]
for i in range(1, len(images) - 1):
f = open('{0}/OUTCAR'.format(str(i).zfill(2)))
elines = []
for line in f:
if 'energy w' in line:
elines += [line]
f.close()
# take last line
fields = elines[-1].split()
energies += [float(fields[-1])]
energies += [float(open('{0}/energy'.format(str(len(images) - 1)).zfill(2)).readline())]
energies = np.array(energies) - energies[0]
# add fitted line to band energies. we make a cubic spline
# interpolating function of the negative energy so we can find the
# minimum which corresponds to the barrier
from scipy.interpolate import interp1d
from scipy.optimize import fmin
f = interp1d(range(len(energies)),
-energies,
kind='cubic', bounds_error=False)
x0 = len(energies) / 2. # guess barrier is at half way
xmax = fmin(f, x0)
xfit = np.linspace(0, len(energies) - 1)
bandfit = -f(xfit)
import matplotlib.pyplot as plt
p = plt.plot(energies-energies[0], 'bo ', label='images')
plt.plot(xfit, bandfit, 'r-', label='fit')
plt.plot(xmax, -f(xmax), '* ', label='max')
plt.xlabel('Image')
plt.ylabel('Energy (eV)')
s = ['$\Delta E$ = {0:1.3f} eV'.format(float(energies[-1]-energies[0])),
'$E^\ddag$ = {0:1.3f} eV'.format(float(-f(xmax)))]
plt.title('\n'.join(s))
plt.legend(loc='best', numpoints=1)
if show:
from ase.visualize import view
view(images)
plt.show()
return p
Vasp.plot_neb = plot_neb
def read_neb_calculator():
'''Read calculator from the current working directory.
Static method that returns a :mod:`jasp.Jasp` calculator.'''
log.debug('Entering read_neb_calculator in {0}'.format(os.getcwd()))
calc = Vasp()
calc.vaspdir = os.getcwd()
calc.read_incar()
calc.read_kpoints()
# set default functional
# only if xc is not already specified
if calc.string_params['gga'] is None:
if calc.input_params['xc'] is None:
calc.input_params['xc'] = 'PBE'
images = []
# These are the interpolated images
log.debug('calc.int_params[images] = %i', calc.int_params['images'])
# We add 2 to include the start and end images
for i in range(calc.int_params['images'] + 2):
log.debug('reading neb calculator: 0%i', i)
cwd = os.getcwd()
os.chdir('{0}'.format(str(i).zfill(2)))
if os.path.exists('CONTCAR'):
f = open('CONTCAR')
if f.read() == '':
log.debug('CONTCAR was empty, vasp probably still running')
fname = 'POSCAR'
else:
fname = 'CONTCAR'
else:
fname = 'POSCAR'
atoms = read(fname, format='vasp')
f = open('ase-sort.dat')
sort, resort = [], []
for line in f:
s,r = [int(x) for x in line.split()]
sort.append(s)
resort.append(r)
images += [atoms[resort]]
os.chdir(cwd)
log.debug('len(images) = %i', len(images))
f = open('00/energy')
calc.neb_initial_energy = float(f.readline().strip())
f.close()
f = open('{0}/energy'.format(str(len(images) - 1).zfill(2)))
calc.neb_final_energy = float(f.readline().strip())
f.close()
calc.neb_images = images
calc.neb_nimages = len(images) - 2
calc.neb=True
return calc
def neb_initialize(neb_images, kwargs):
'''Creates necessary files for an NEB calculation'''
for a in neb_images:
log.debug(a.numbers)
calc = Vasp()
# how to get the initial and final energies?
initial = neb_images[0]
log.debug(initial.numbers)
calc0 = initial.get_calculator()
log.debug('Calculator cwd = %s', calc0.cwd)
log.debug('Calculator vaspdir = %s', calc0.vaspdir)
# we have to store the initial and final energies because
# otherwise they will not be available when reread the
# directory in another script, e.g. jaspsum. The only other
# option is to make the initial and final directories full
# vasp calculations.
CWD = os.getcwd()
try:
os.chdir(os.path.join(calc0.cwd, calc0.vaspdir))
e0 = calc0.read_energy()[1]
calc.neb_initial_energy = e0
finally:
os.chdir(CWD)
final = neb_images[-1]
log.debug(final.numbers)
calc_final = final.get_calculator()
log.debug(calc_final.cwd)
log.debug(calc_final.vaspdir)
try:
os.chdir(os.path.join(calc_final.cwd, calc_final.vaspdir))
efinal = calc_final.read_energy()[1]
calc.neb_final_energy = efinal
finally:
os.chdir(CWD)
# make a Vasp object and set inputs to initial image
calc.int_params.update(calc0.int_params)
calc.float_params.update(calc0.float_params)
calc.exp_params.update(calc0.exp_params)
calc.string_params.update(calc0.string_params)
calc.bool_params.update(calc0.bool_params)
calc.list_params.update(calc0.list_params)
calc.dict_params.update(calc0.dict_params)
calc.input_params.update(calc0.input_params)
calc.neb_kwargs = kwargs
# this is the vasp images tag. it does not include the endpoints
IMAGES = len(neb_images) - 2
calc.set(images=IMAGES)
calc.neb_images = neb_images
calc.neb_nimages = IMAGES
calc.neb = True
return calc
| prtkm/jasp | jasp/jasp_neb.py | Python | gpl-2.0 | 14,770 | [
"ASE",
"VASP"
] | 020344cc26d536fdb046ad3f505f3a86f06c603b5fb2645d87d4c0a94fc4891b |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="HyPED",
version="0.0.2",
author="Joseph Osborn, Brian Lambrigger, Calvin Walantus",
author_email="jcosborn@ucsc.edu",
description=("Hybrid automata-based game character modeling"),
license="MIT",
# keywords="example documentation tutorial",
url="https://github.com/JoeOsborn/hyped",
packages=['hyped', 'player', 'test'],
install_requires=[
'numpy',
'scipy',
'sympy',
'pyopengl',
'PyOpenGL_accelerate',
'dill',
'bitarray',
'vectormath',
# 'defusedxml',
'matplotlib',
'rnc2rng',
'lxml',
'z3-solver'
],
long_description=read('README'),
classifiers=[
"Development Status :: 3 - Alpha",
# "Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| JoeOsborn/hyped | setup.py | Python | mit | 1,206 | [
"Brian"
] | f178439207c59f68b31ac367f861dabf53d5260309b68f95b2dd74d20d6679a4 |
#!/usr/bin/env python
'''
Online example from :
http://www.vtk.org/Wiki/VTK/Examples/Python/Graphs/EdgeWeights
Going to heavly comment out the steps to understand how to edit
this into what we need to make other graphs
'''
import vtk # Manditory on all python VTK
import random
g = vtk.vtkMutableUndirectedGraph() # Sets up empty data structure
# Create a start node
init_v = g.AddVertex()
# Create this list of leaves + append inital
leaf_list = []
leaf_list.append(init_v)
# Randomly Pick a Termination number
total_nodes = random.randint(1,1000)
created_nodes = 0
# While don't exede the limit + run out of leafs
while(created_nodes < total_nodes and len(leaf_list) != 0):
# Get the current leaf, oldest
cur_leaf = leaf_list.pop(0)
# How many children will you have
num_children = random.randint(1,10)
# Create childen
for i in range(num_children):
child = g.AddVertex()
g.AddGraphEdge(cur_leaf, child)
leaf_list.append(child)
created_nodes+=1
# Making the layout view
# Which is an awsome class that handles a lot of the backend
# stuff for interaction with the graph we are about to make
graphLayoutView = vtk.vtkGraphLayoutView()
# Toss in your graph into this view
graphLayoutView.AddRepresentationFromInput(g)
# General Set Up Stuff
graphLayoutView.SetLayoutStrategy("Simple 2D")
# Recenters the camera around our points
graphLayoutView.ResetCamera()
# Opens and renders the window
jkjgraphLayoutView.Render()
# I have no idea what the hell this does
graphLayoutView.GetLayoutStrategy().SetRandomSeed(0)
# Sets up the interaction widets/event_listeners
graphLayoutView.GetInteractor().Start()
| espinm2/interactive-vtk | pysrc/PlanarGraph.py | Python | mit | 1,648 | [
"VTK"
] | f6c870e0b6d56a4236a8f936e82c4817554e2c978505f6380cf6547c530dc2f5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.