text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
################################
## Set diagnostics to True ##
## If you want display the ##
## Tracking process. ##
################################
diagnostics=False
#################################
## Import packages ##
#################################
import sys
from trackeddy.tracking import *
from trackeddy.datastruct import *
from trackeddy.geometryfunc import *
from trackeddy.physics import *
from numpy import *
from pylab import *
import cmocean as cm
import random
import pytest
import warnings
warnings.filterwarnings("ignore")
#################################
## Import tools to create ##
## syntetic fields ##
#################################
from trackeddy.utils.gaussian_field_functions import *
import trackeddy.utils.field_generator as fg
#################################
## Test 1: Check the detection ##
## of a steady gaussian ##
#################################
def gauss_n_fit(n):
'''
Test the number of eddies identified during 40 timesteps in a random walker gaussian field.
'''
a = 0.1
b = 0.1
t0 = 0
t = 1
xx=linspace(10,12,200)
yy=linspace(10,12,200)
gf=fg.Generate_field(a,b,n,xx,yy,'Nint')
data = gf.assemble_field(t)
x=linspace(10,13,300)
y=linspace(10,13,300)
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
try:
levels = {'max':data.max(),'min':0.1,'step':0.1}
eddytd = analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
except:
print("No positive")
try:
levels = {'max':data.min(),'min':-0.1,'step':-0.1}
eddytdn = analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
except:
print("No negative")
return len(eddytd.keys())+len(eddytdn.keys())
@pytest.mark.ttrackeddy
def test_gauss_n_fit():
assert gauss_n_fit(7) == 7
def gauss_mult_n_fit(n,t):
'''
Test the number of eddies identified during 40 timesteps in a random walker gaussian field.
'''
a = 0.07
b = 0.07
t0 = 0
xx=linspace(10,12.5,300)
yy=linspace(10,12.5,300)
gf=fg.Generate_field(a,b,n,xx,yy,'Nint')
data = gf.assemble_field(t)
x=linspace(10,13.5,400)
y=linspace(10,13.5,400)
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
try:
levels = {'max':data.max(),'min':0.1,'step':0.1}
eddytd = analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
except:
print("No positive")
try:
levels = {'max':data.min(),'min':-0.1,'step':-0.1}
eddytdn = analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
except:
print("No negative")
posn=sum([len(item['time']) for keys,item in eddytd.items()])
negn=sum([len(item['time']) for keys,item in eddytdn.items()])
# print(posn,negn)
# print([item['time'] for keys,item in eddytd.items()])
# for ii in range(shape(data)[0]):
# contourf(x,y,data[ii,:,:],vmin=-1,vmac=1,cmap=cm.cm.balance)
# colorbar()
# savefig("deleteme_%03d_n%d.png" % (ii,n))
# close()
return posn+negn
@pytest.mark.parametrize(('n', 't'), [
(3, 10),
(4, 9),
(5, 8),
(6, 7),
(7, 6),
(8, 5),
(9, 4),
(10, 3),
(11, 2),
(12, 1),
])
@pytest.mark.ttrackeddy
def test_gauss_mult_n_fit(n,t):
assert gauss_mult_n_fit(n,t) == n * t
|
Josue-Martinez-Moreno/trackeddy
|
tests/test_2d_assemble.py
|
Python
|
mit
| 4,093
|
[
"Gaussian"
] |
627c48c76dc6ce9116141e0e424f44b318841cfbc80c75b674d6d55541f29a73
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import argparse
import re
from os.path import join, exists
from tempfile import mkdtemp
from shutil import rmtree
import mx
import mx_benchmark
import mx_graal_core
from mx_benchmark import ParserEntry
from argparse import ArgumentParser
# Short-hand commands used to quickly run common benchmarks.
mx.update_commands(mx.suite('graal-core'), {
'dacapo': [
lambda args: createBenchmarkShortcut("dacapo", args),
'[<benchmarks>|*] [-- [VM options] [-- [DaCapo options]]]'
],
'scaladacapo': [
lambda args: createBenchmarkShortcut("scala-dacapo", args),
'[<benchmarks>|*] [-- [VM options] [-- [Scala DaCapo options]]]'
],
'specjvm2008': [
lambda args: createBenchmarkShortcut("specjvm2008", args),
'[<benchmarks>|*] [-- [VM options] [-- [SPECjvm2008 options]]]'
],
'specjbb2005': [
lambda args: mx_benchmark.benchmark(["specjbb2005"] + args),
'[-- [VM options] [-- [SPECjbb2005 options]]]'
],
'specjbb2013': [
lambda args: mx_benchmark.benchmark(["specjbb2013"] + args),
'[-- [VM options] [-- [SPECjbb2013 options]]]'
],
'specjbb2015': [
lambda args: mx_benchmark.benchmark(["specjbb2015"] + args),
'[-- [VM options] [-- [SPECjbb2015 options]]]'
],
})
def createBenchmarkShortcut(benchSuite, args):
if not args:
benchname = "*"
remaining_args = []
elif args[0] == "--":
# not a benchmark name
benchname = "*"
remaining_args = args
else:
benchname = args[0]
remaining_args = args[1:]
return mx_benchmark.benchmark([benchSuite + ":" + benchname] + remaining_args)
# dacapo suite parsers.
def _create_dacapo_parser():
parser = ArgumentParser(add_help=False, usage=mx_benchmark._mx_benchmark_usage_example + " -- <options> -- ...")
group = parser.add_mutually_exclusive_group()
group.add_argument("--keep-scratch", action="store_true", help="Do not delete scratch directory after benchmark execution.")
group.add_argument("--no-scratch", action="store_true", help="Do not execute benchmark in scratch directory.")
return parser
mx_benchmark.parsers["dacapo_benchmark_suite"] = ParserEntry(
_create_dacapo_parser(),
"\n\nFlags for DaCapo-style benchmark suites:\n"
)
class JvmciJdkVm(mx_benchmark.OutputCapturingJavaVm):
def __init__(self, raw_name, raw_config_name, extra_args):
self.raw_name = raw_name
self.raw_config_name = raw_config_name
self.extra_args = extra_args
def name(self):
return self.raw_name
def config_name(self):
return self.raw_config_name
def dimensions(self, cwd, args, code, out):
return {
"host-vm": self.name(),
"host-vm-config": self.config_name(),
"guest-vm": "none",
"guest-vm-config": "none"
}
def post_process_command_line_args(self, args):
return self.extra_args + args
def run_java(self, args, out=None, err=None, cwd=None, nonZeroIsFatal=False):
tag = mx.get_jdk_option().tag
if tag and tag != mx_graal_core._JVMCI_JDK_TAG:
mx.abort("The '{0}/{1}' VM requires '--jdk={2}'".format(
self.name(), self.config_name(), mx_graal_core._JVMCI_JDK_TAG))
mx.get_jdk(tag=mx_graal_core._JVMCI_JDK_TAG).run_java(
args, out=out, err=out, cwd=cwd, nonZeroIsFatal=False)
mx_benchmark.add_java_vm(JvmciJdkVm('server', 'default', ['-server', '-XX:-EnableJVMCI']))
mx_benchmark.add_java_vm(JvmciJdkVm('server', 'hosted', ['-server', '-XX:+EnableJVMCI']))
mx_benchmark.add_java_vm(JvmciJdkVm('server', 'graal-core', ['-server', '-XX:+EnableJVMCI', '-XX:+UseJVMCICompiler', '-Djvmci.Compiler=graal']))
mx_benchmark.add_java_vm(JvmciJdkVm('server', 'graal-core-tracera', ['-server', '-XX:+EnableJVMCI', '-XX:+UseJVMCICompiler', '-Djvmci.Compiler=graal',
'-Dgraal.TraceRA=true']))
# On 64 bit systems -client is not supported. Nevertheless, when running with -server, we can
# force the VM to just compile code with C1 but not with C2 by adding option -XX:TieredStopAtLevel=1.
# This behavior is the closest we can get to the -client vm configuration.
mx_benchmark.add_java_vm(JvmciJdkVm('client', 'default', ['-server', '-XX:-EnableJVMCI', '-XX:TieredStopAtLevel=1']))
mx_benchmark.add_java_vm(JvmciJdkVm('client', 'hosted', ['-server', '-XX:+EnableJVMCI', '-XX:TieredStopAtLevel=1']))
class TimingBenchmarkMixin(object):
debug_values_file = 'debug-values.csv'
name_re = re.compile(r"(?P<name>GraalCompiler|BackEnd|FrontEnd|LIRPhaseTime_\w+)_Accm")
def vmArgs(self, bmSuiteArgs):
vmArgs = ['-Dgraal.Time=', '-Dgraal.DebugValueHumanReadable=false', '-Dgraal.DebugValueSummary=Name',
'-Dgraal.DebugValueFile=' + TimingBenchmarkMixin.debug_values_file] + super(TimingBenchmarkMixin, self).vmArgs(bmSuiteArgs)
return vmArgs
def getBechmarkName(self):
raise NotImplementedError()
def benchSuiteName(self):
raise NotImplementedError()
def name(self):
return self.benchSuiteName() + "-timing"
def filterResult(self, r):
m = TimingBenchmarkMixin.name_re.match(r['name'])
if m:
r['name'] = m.groupdict()['name']
return r
return None
def get_csv_filename(self, benchmarks, bmSuiteArgs):
return TimingBenchmarkMixin.debug_values_file
def rules(self, out, benchmarks, bmSuiteArgs):
return [
mx_benchmark.CSVFixedFileRule(
filename=self.get_csv_filename(benchmarks, bmSuiteArgs),
colnames=['scope', 'name', 'value', 'unit'],
replacement={
"benchmark": self.getBechmarkName(),
"bench-suite": self.benchSuiteName(),
"vm": "jvmci",
"config.name": "default",
"extra.value.name": ("<name>", str),
"metric.name": ("compile-time", str),
"metric.value": ("<value>", int),
"metric.unit": ("<unit>", str),
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": 0
},
filter_fn=self.filterResult,
delimiter=';', quotechar='"', escapechar='\\'
),
]
class DaCapoTimingBenchmarkMixin(TimingBenchmarkMixin):
def postprocessRunArgs(self, benchname, runArgs):
self.currentBenchname = benchname
return super(DaCapoTimingBenchmarkMixin, self).postprocessRunArgs(benchname, runArgs)
def getBechmarkName(self):
return self.currentBenchname
class MoveProfilingBenchmarkMixin(object):
"""Benchmark-mixin for measuring the number of dynamically executed move instructions.
See com.oracle.graal.lir.profiling.MoveProfilingPhase for more details.
"""
benchmark_counters_file = 'benchmark-counters.csv'
def vmArgs(self, bmSuiteArgs):
vmArgs = [
self.get_dynamic_counters_argument(),
'-XX:JVMCICounterSize=10',
'-Dgraal.LIRProfileMoves=true',
'-Dgraal.DynamicCountersHumanReadable=false',
'-Dgraal.DynamicCountersPrintGroupSeparator=false',
'-Dgraal.BenchmarkCountersFile=' + MoveProfilingBenchmarkMixin.benchmark_counters_file] + super(MoveProfilingBenchmarkMixin, self).vmArgs(bmSuiteArgs)
return vmArgs
def get_dynamic_counters_argument(self):
""" The argument to select the desired dynamic counters mode. Possible values are
`-Dgraal.GenericDynamicCounters=...`, `-Dgraal.TimedDynamicCounters=...` or
`-Dgraal.BenchmarkDynamicCounters=...`. See com.oracle.graal.hotspot.debug.BenchmarkCounters
for more information.
"""
raise NotImplementedError()
def getBechmarkName(self):
raise NotImplementedError()
def benchSuiteName(self):
raise NotImplementedError()
def name(self):
return self.benchSuiteName() + "-move-profiling"
def get_csv_filename(self, benchmarks, bmSuiteArgs):
return MoveProfilingBenchmarkMixin.benchmark_counters_file
def shorten_flags(self, args):
def _shorten(x):
if any(p in x for p in ["DynamicCounter", "BenchmarkCounter"]):
return "..."
return x
arg_str = " ".join((_shorten(x) for x in args))
return mx_benchmark.Rule.crop_back("...")(arg_str)
def rules(self, out, benchmarks, bmSuiteArgs):
return [
mx_benchmark.CSVFixedFileRule(
filename=self.get_csv_filename(benchmarks, bmSuiteArgs),
colnames=['type', 'group', 'name', 'value'],
replacement={
"benchmark": self.getBechmarkName(),
"bench-suite": self.benchSuiteName(),
"vm": "jvmci",
"config.name": "default",
"config.vm-flags": self.shorten_flags(self.vmArgs(bmSuiteArgs)),
"extra.value.name": ("<name>", str),
"metric.name": ("dynamic-moves", str),
"metric.value": ("<value>", int),
"metric.unit": "#",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": 0
},
delimiter=';', quotechar='"', escapechar='\\'
),
]
class DaCapoMoveProfilingBenchmarkMixin(MoveProfilingBenchmarkMixin):
def vmArgs(self, bmSuiteArgs):
# we need to boostrap to eagerly initialize Graal otherwise we cannot intercept
# stdio since it is rerouted by the dacapo harness
return ['-XX:+BootstrapJVMCI'] + super(DaCapoMoveProfilingBenchmarkMixin, self).vmArgs(bmSuiteArgs)
def get_dynamic_counters_argument(self):
# we only count the moves executed during the last (the measurement) iteration
return '-Dgraal.BenchmarkDynamicCounters=err, starting ====, PASSED in '
def postprocessRunArgs(self, benchname, runArgs):
self.currentBenchname = benchname
return super(DaCapoMoveProfilingBenchmarkMixin, self).postprocessRunArgs(benchname, runArgs)
def getBechmarkName(self):
return self.currentBenchname
class BaseDaCapoBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite):
"""Base benchmark suite for DaCapo-based benchmarks.
This suite can only run a single benchmark in one VM invocation.
"""
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
def before(self, bmSuiteArgs):
parser = mx_benchmark.parsers["dacapo_benchmark_suite"].parser
bmArgs, _ = parser.parse_known_args(bmSuiteArgs)
self.keepScratchDir = bmArgs.keep_scratch
if not bmArgs.no_scratch:
self.workdir = mkdtemp(prefix='dacapo-work.', dir='.')
else:
mx.warn("NO scratch directory created! (--no-scratch)")
self.workdir = None
def workingDirectory(self, benchmarks, bmSuiteArgs):
return self.workdir
def after(self, bmSuiteArgs):
if hasattr(self, "keepScratchDir") and self.keepScratchDir:
mx.warn("Scratch directory NOT deleted (--keep-scratch): {0}".format(self.workdir))
elif self.workdir:
rmtree(self.workdir)
def daCapoClasspathEnvVarName(self):
raise NotImplementedError()
def daCapoLibraryName(self):
raise NotImplementedError()
def daCapoPath(self):
dacapo = mx.get_env(self.daCapoClasspathEnvVarName())
if dacapo:
return dacapo
lib = mx.library(self.daCapoLibraryName(), False)
if lib:
return lib.get_path(True)
return None
def daCapoIterations(self):
raise NotImplementedError()
def parserNames(self):
return super(BaseDaCapoBenchmarkSuite, self).parserNames() + ["dacapo_benchmark_suite"]
def validateEnvironment(self):
if not self.daCapoPath():
raise RuntimeError(
"Neither " + self.daCapoClasspathEnvVarName() + " variable nor " +
self.daCapoLibraryName() + " library specified.")
def validateReturnCode(self, retcode):
return retcode == 0
def postprocessRunArgs(self, benchname, runArgs):
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-n", default=None)
args, remaining = parser.parse_known_args(runArgs)
if args.n:
if args.n.isdigit():
return ["-n", args.n] + remaining
if args.n == "-1":
return None
else:
iterations = self.daCapoIterations()[benchname]
if iterations == -1:
return None
else:
return ["-n", str(iterations)] + remaining
def vmArgs(self, bmSuiteArgs):
parser = mx_benchmark.parsers["dacapo_benchmark_suite"].parser
_, remainingBmSuiteArgs = parser.parse_known_args(bmSuiteArgs)
vmArgs = super(BaseDaCapoBenchmarkSuite, self).vmArgs(remainingBmSuiteArgs)
return vmArgs
def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
if benchmarks is None:
raise RuntimeError(
"Suite runs only a single benchmark.")
if len(benchmarks) != 1:
raise RuntimeError(
"Suite runs only a single benchmark, got: {0}".format(benchmarks))
runArgs = self.postprocessRunArgs(benchmarks[0], self.runArgs(bmSuiteArgs))
if runArgs is None:
return None
return (
self.vmArgs(bmSuiteArgs) + ["-jar"] + [self.daCapoPath()] +
[benchmarks[0]] + runArgs)
def benchmarkList(self, bmSuiteArgs):
return [key for key, value in self.daCapoIterations().iteritems() if value != -1]
def daCapoSuiteTitle(self):
"""Title string used in the output next to the performance result."""
raise NotImplementedError()
def successPatterns(self):
return [
re.compile(
r"^===== " + re.escape(self.daCapoSuiteTitle()) + " ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====", # pylint: disable=line-too-long
re.MULTILINE)
]
def failurePatterns(self):
return [
re.compile(
r"^===== " + re.escape(self.daCapoSuiteTitle()) + " ([a-zA-Z0-9_]+) FAILED (warmup|) =====", # pylint: disable=line-too-long
re.MULTILINE)
]
def rules(self, out, benchmarks, bmSuiteArgs):
runArgs = self.postprocessRunArgs(benchmarks[0], self.runArgs(bmSuiteArgs))
if runArgs is None:
return []
totalIterations = int(runArgs[runArgs.index("-n") + 1])
return [
mx_benchmark.StdOutRule(
r"===== " + re.escape(self.daCapoSuiteTitle()) + " (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long
{
"benchmark": ("<benchmark>", str),
"vm": "jvmci",
"config.name": "default",
"metric.name": "time",
"metric.value": ("<time>", int),
"metric.unit": "ms",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": 0
}
),
mx_benchmark.StdOutRule(
r"===== " + re.escape(self.daCapoSuiteTitle()) + " (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long
{
"benchmark": ("<benchmark>", str),
"vm": "jvmci",
"config.name": "default",
"metric.name": "warmup",
"metric.value": ("<time>", int),
"metric.unit": "ms",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": totalIterations - 1
}
),
mx_benchmark.StdOutRule(
r"===== " + re.escape(self.daCapoSuiteTitle()) + " (?P<benchmark>[a-zA-Z0-9_]+) completed warmup [0-9]+ in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long
{
"benchmark": ("<benchmark>", str),
"vm": "jvmci",
"config.name": "default",
"metric.name": "warmup",
"metric.value": ("<time>", int),
"metric.unit": "ms",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": ("$iteration", int)
}
)
]
_daCapoIterations = {
"avrora" : 20,
"batik" : 40,
"eclipse" : -1,
"fop" : 40,
"h2" : 20,
"jython" : 40,
"luindex" : 15,
"lusearch" : 40,
"pmd" : 30,
"sunflow" : 30,
"tomcat" : -1, # Stopped working as of 8u92
"tradebeans" : -1,
"tradesoap" : -1,
"xalan" : 20,
}
class DaCapoBenchmarkSuite(BaseDaCapoBenchmarkSuite):
"""DaCapo 9.12 (Bach) benchmark suite implementation."""
def name(self):
return "dacapo"
def daCapoSuiteTitle(self):
return "DaCapo 9.12"
def daCapoClasspathEnvVarName(self):
return "DACAPO_CP"
def daCapoLibraryName(self):
return "DACAPO"
def daCapoIterations(self):
return _daCapoIterations
def flakySuccessPatterns(self):
return [
re.compile(
r"^javax.ejb.FinderException: Cannot find account for",
re.MULTILINE),
re.compile(
r"^java.lang.Exception: TradeDirect:Login failure for user:",
re.MULTILINE),
]
mx_benchmark.add_bm_suite(DaCapoBenchmarkSuite())
class DaCapoTimingBenchmarkSuite(DaCapoTimingBenchmarkMixin, DaCapoBenchmarkSuite): # pylint: disable=too-many-ancestors
"""DaCapo 9.12 (Bach) benchmark suite implementation."""
def benchSuiteName(self):
return "dacapo"
mx_benchmark.add_bm_suite(DaCapoTimingBenchmarkSuite())
class DaCapoMoveProfilingBenchmarkSuite(DaCapoMoveProfilingBenchmarkMixin, DaCapoBenchmarkSuite): # pylint: disable=too-many-ancestors
"""DaCapo 9.12 (Bach) benchmark suite implementation."""
def benchSuiteName(self):
return "dacapo"
mx_benchmark.add_bm_suite(DaCapoMoveProfilingBenchmarkSuite())
_daCapoScalaConfig = {
"actors" : 10,
"apparat" : 5,
"factorie" : 5,
"kiama" : 40,
"scalac" : 20,
"scaladoc" : 15,
"scalap" : 120,
"scalariform" : 30,
"scalatest" : 50,
"scalaxb" : 35,
"specs" : 20,
"tmt" : 12
}
class ScalaDaCapoBenchmarkSuite(BaseDaCapoBenchmarkSuite):
"""Scala DaCapo benchmark suite implementation."""
def name(self):
return "scala-dacapo"
def daCapoSuiteTitle(self):
return "DaCapo 0.1.0-SNAPSHOT"
def daCapoClasspathEnvVarName(self):
return "DACAPO_SCALA_CP"
def daCapoLibraryName(self):
return "DACAPO_SCALA"
def daCapoIterations(self):
return _daCapoScalaConfig
mx_benchmark.add_bm_suite(ScalaDaCapoBenchmarkSuite())
class ScalaDaCapoTimingBenchmarkSuite(DaCapoTimingBenchmarkMixin, ScalaDaCapoBenchmarkSuite): # pylint: disable=too-many-ancestors
"""Scala DaCapo benchmark suite implementation."""
def benchSuiteName(self):
return "scala-dacapo"
mx_benchmark.add_bm_suite(ScalaDaCapoTimingBenchmarkSuite())
class ScalaDaCapoMoveProfilingBenchmarkSuite(DaCapoMoveProfilingBenchmarkMixin, ScalaDaCapoBenchmarkSuite): # pylint: disable=too-many-ancestors
"""Scala DaCapo benchmark suite implementation."""
def benchSuiteName(self):
return "scala-dacapo"
mx_benchmark.add_bm_suite(ScalaDaCapoMoveProfilingBenchmarkSuite())
_allSpecJVM2008Benches = [
'startup.helloworld',
'startup.compiler.compiler',
# 'startup.compiler.sunflow', # disabled until timeout problem in jdk8 is resolved
'startup.compress',
'startup.crypto.aes',
'startup.crypto.rsa',
'startup.crypto.signverify',
'startup.mpegaudio',
'startup.scimark.fft',
'startup.scimark.lu',
'startup.scimark.monte_carlo',
'startup.scimark.sor',
'startup.scimark.sparse',
'startup.serial',
'startup.sunflow',
'startup.xml.transform',
'startup.xml.validation',
'compiler.compiler',
# 'compiler.sunflow',
'compress',
'crypto.aes',
'crypto.rsa',
'crypto.signverify',
'derby',
'mpegaudio',
'scimark.fft.large',
'scimark.lu.large',
'scimark.sor.large',
'scimark.sparse.large',
'scimark.fft.small',
'scimark.lu.small',
'scimark.sor.small',
'scimark.sparse.small',
'scimark.monte_carlo',
'serial',
'sunflow',
'xml.transform',
'xml.validation'
]
class SpecJvm2008BenchmarkSuite(mx_benchmark.JavaBenchmarkSuite):
"""SpecJVM2008 benchmark suite implementation.
This benchmark suite can run multiple benchmarks as part of one VM run.
"""
def name(self):
return "specjvm2008"
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
def specJvmPath(self):
specjvm2008 = mx.get_env("SPECJVM2008")
if specjvm2008 is None:
mx.abort("Please set the SPECJVM2008 environment variable to a " +
"SPECjvm2008 directory.")
jarpath = join(specjvm2008, "SPECjvm2008.jar")
if not exists(jarpath):
mx.abort("The SPECJVM2008 environment variable points to a directory " +
"without the SPECjvm2008.jar file.")
return jarpath
def validateEnvironment(self):
if not self.specJvmPath():
raise RuntimeError(
"The SPECJVM2008 environment variable was not specified.")
def validateReturnCode(self, retcode):
return retcode == 0
def workingDirectory(self, benchmarks, bmSuiteArgs):
return mx.get_env("SPECJVM2008")
def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
if benchmarks is None:
# No benchmark specified in the command line, so run everything.
benchmarks = self.benchmarkList(bmSuiteArgs)
vmArgs = self.vmArgs(bmSuiteArgs)
runArgs = self.runArgs(bmSuiteArgs)
return vmArgs + ["-jar"] + [self.specJvmPath()] + runArgs + benchmarks
def benchmarkList(self, bmSuiteArgs):
return _allSpecJVM2008Benches
def successPatterns(self):
return [
re.compile(
r"^(Noncompliant c|C)omposite result: (?P<score>[0-9]+((,|\.)[0-9]+)?)( SPECjvm2008 (Base|Peak))? ops/m$", # pylint: disable=line-too-long
re.MULTILINE)
]
def failurePatterns(self):
return [
re.compile(r"^Errors in benchmark: ", re.MULTILINE)
]
def flakySuccessPatterns(self):
return []
def rules(self, out, benchmarks, bmSuiteArgs):
suite_name = self.name()
if benchmarks and len(benchmarks) == 1:
suite_name = suite_name + "-single"
return [
mx_benchmark.StdOutRule(
r"^Score on (?P<benchmark>[a-zA-Z0-9\._]+): (?P<score>[0-9]+((,|\.)[0-9]+)?) ops/m$", # pylint: disable=line-too-long
{
"benchmark": ("<benchmark>", str),
"bench-suite": suite_name,
"vm": "jvmci",
"config.name": "default",
"metric.name": "throughput",
"metric.value": ("<score>", float),
"metric.unit": "op/min",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "higher",
"metric.iteration": 0
}
)
]
mx_benchmark.add_bm_suite(SpecJvm2008BenchmarkSuite())
class SpecJbb2005BenchmarkSuite(mx_benchmark.JavaBenchmarkSuite):
"""SPECjbb2005 benchmark suite implementation.
This suite has only a single benchmark, and does not allow setting a specific
benchmark in the command line.
"""
def name(self):
return "specjbb2005"
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
def specJbbClassPath(self):
specjbb2005 = mx.get_env("SPECJBB2005")
if specjbb2005 is None:
mx.abort("Please set the SPECJBB2005 environment variable to a " +
"SPECjbb2005 directory.")
jbbpath = join(specjbb2005, "jbb.jar")
if not exists(jbbpath):
mx.abort("The SPECJBB2005 environment variable points to a directory " +
"without the jbb.jar file.")
checkpath = join(specjbb2005, "check.jar")
if not exists(checkpath):
mx.abort("The SPECJBB2005 environment variable points to a directory " +
"without the check.jar file.")
return jbbpath + ":" + checkpath
def validateEnvironment(self):
if not self.specJbbClassPath():
raise RuntimeError(
"The SPECJBB2005 environment variable was not specified.")
def validateReturnCode(self, retcode):
return retcode == 0
def workingDirectory(self, benchmarks, bmSuiteArgs):
return mx.get_env("SPECJBB2005")
def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
if benchmarks is not None:
mx.abort("No benchmark should be specified for the selected suite.")
vmArgs = self.vmArgs(bmSuiteArgs)
runArgs = self.runArgs(bmSuiteArgs)
mainClass = "spec.jbb.JBBmain"
propArgs = ["-propfile", "SPECjbb.props"]
return (
vmArgs + ["-cp"] + [self.specJbbClassPath()] + [mainClass] + propArgs +
runArgs)
def benchmarkList(self, bmSuiteArgs):
return ["default"]
def successPatterns(self):
return [
re.compile(
r"^Valid run, Score is [0-9]+$", # pylint: disable=line-too-long
re.MULTILINE)
]
def failurePatterns(self):
return [
re.compile(r"VALIDATION ERROR", re.MULTILINE)
]
def flakySuccessPatterns(self):
return []
def rules(self, out, benchmarks, bmSuiteArgs):
return [
mx_benchmark.StdOutRule(
r"^Valid run, Score is (?P<score>[0-9]+)$", # pylint: disable=line-too-long
{
"benchmark": "default",
"vm": "jvmci",
"config.name": "default",
"metric.name": "throughput",
"metric.value": ("<score>", float),
"metric.unit": "bops",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "higher",
"metric.iteration": 0
}
)
]
mx_benchmark.add_bm_suite(SpecJbb2005BenchmarkSuite())
class SpecJbb2013BenchmarkSuite(mx_benchmark.JavaBenchmarkSuite):
"""SPECjbb2013 benchmark suite implementation.
This suite has only a single benchmark, and does not allow setting a specific
benchmark in the command line.
"""
def name(self):
return "specjbb2013"
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
def specJbbClassPath(self):
specjbb2013 = mx.get_env("SPECJBB2013")
if specjbb2013 is None:
mx.abort("Please set the SPECJBB2013 environment variable to a " +
"SPECjbb2013 directory.")
jbbpath = join(specjbb2013, "specjbb2013.jar")
if not exists(jbbpath):
mx.abort("The SPECJBB2013 environment variable points to a directory " +
"without the specjbb2013.jar file.")
return jbbpath
def validateEnvironment(self):
if not self.specJbbClassPath():
raise RuntimeError(
"The SPECJBB2013 environment variable was not specified.")
def validateReturnCode(self, retcode):
return retcode == 0
def workingDirectory(self, benchmarks, bmSuiteArgs):
return mx.get_env("SPECJBB2013")
def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
if benchmarks is not None:
mx.abort("No benchmark should be specified for the selected suite.")
vmArgs = self.vmArgs(bmSuiteArgs)
runArgs = self.runArgs(bmSuiteArgs)
return vmArgs + ["-jar", self.specJbbClassPath(), "-m", "composite"] + runArgs
def benchmarkList(self, bmSuiteArgs):
return ["default"]
def successPatterns(self):
return [
re.compile(
r"org.spec.jbb.controller: Run finished", # pylint: disable=line-too-long
re.MULTILINE)
]
def failurePatterns(self):
return []
def flakySuccessPatterns(self):
return []
def rules(self, out, benchmarks, bmSuiteArgs):
result_pattern = r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$" # pylint: disable=line-too-long
return [
mx_benchmark.StdOutRule(
result_pattern,
{
"benchmark": "default",
"vm": "jvmci",
"config.name": "default",
"metric.name": "max",
"metric.value": ("<max>", float),
"metric.unit": "jops",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "higher",
"metric.iteration": 0
}
),
mx_benchmark.StdOutRule(
result_pattern,
{
"benchmark": "default",
"vm": "jvmci",
"config.name": "default",
"metric.name": "critical",
"metric.value": ("<critical>", float),
"metric.unit": "jops",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "higher",
"metric.iteration": 0
}
)
]
mx_benchmark.add_bm_suite(SpecJbb2013BenchmarkSuite())
class SpecJbb2015BenchmarkSuite(mx_benchmark.JavaBenchmarkSuite):
"""SPECjbb2015 benchmark suite implementation.
This suite has only a single benchmark, and does not allow setting a specific
benchmark in the command line.
"""
def name(self):
return "specjbb2015"
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
def specJbbClassPath(self):
specjbb2015 = mx.get_env("SPECJBB2015")
if specjbb2015 is None:
mx.abort("Please set the SPECJBB2015 environment variable to a " +
"SPECjbb2015 directory.")
jbbpath = join(specjbb2015, "specjbb2015.jar")
if not exists(jbbpath):
mx.abort("The SPECJBB2015 environment variable points to a directory " +
"without the specjbb2015.jar file.")
return jbbpath
def validateEnvironment(self):
if not self.specJbbClassPath():
raise RuntimeError(
"The SPECJBB2015 environment variable was not specified.")
def validateReturnCode(self, retcode):
return retcode == 0
def workingDirectory(self, benchmarks, bmSuiteArgs):
return mx.get_env("SPECJBB2015")
def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
if benchmarks is not None:
mx.abort("No benchmark should be specified for the selected suite.")
vmArgs = self.vmArgs(bmSuiteArgs)
runArgs = self.runArgs(bmSuiteArgs)
return vmArgs + ["-jar", self.specJbbClassPath(), "-m", "composite"] + runArgs
def benchmarkList(self, bmSuiteArgs):
return ["default"]
def successPatterns(self):
return [
re.compile(
r"org.spec.jbb.controller: Run finished", # pylint: disable=line-too-long
re.MULTILINE)
]
def failurePatterns(self):
return []
def flakySuccessPatterns(self):
return []
def rules(self, out, benchmarks, bmSuiteArgs):
result_pattern = r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$" # pylint: disable=line-too-long
return [
mx_benchmark.StdOutRule(
result_pattern,
{
"benchmark": "default",
"vm": "jvmci",
"config.name": "default",
"metric.name": "max",
"metric.value": ("<max>", float),
"metric.unit": "jops",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "higher",
"metric.iteration": 0
}
),
mx_benchmark.StdOutRule(
result_pattern,
{
"benchmark": "default",
"vm": "jvmci",
"config.name": "default",
"metric.name": "critical",
"metric.value": ("<critical>", float),
"metric.unit": "jops",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "higher",
"metric.iteration": 0
}
)
]
mx_benchmark.add_bm_suite(SpecJbb2015BenchmarkSuite())
class JMHRunnerGraalCoreBenchmarkSuite(mx_benchmark.JMHRunnerBenchmarkSuite):
def name(self):
return "jmh-graal-core-whitebox"
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
def extraVmArgs(self):
return ['-XX:-UseJVMCIClassLoader'] + super(JMHRunnerGraalCoreBenchmarkSuite, self).extraVmArgs()
mx_benchmark.add_bm_suite(JMHRunnerGraalCoreBenchmarkSuite())
class JMHJarGraalCoreBenchmarkSuite(mx_benchmark.JMHJarBenchmarkSuite):
def name(self):
return "jmh-jar"
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
mx_benchmark.add_bm_suite(JMHJarGraalCoreBenchmarkSuite())
class RenaissanceBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite):
"""Renaissance benchmark suite implementation.
"""
def name(self):
return "renaissance"
def group(self):
return "Graal"
def subgroup(self):
return "graal-compiler"
def renaissancePath(self):
renaissance = mx.get_env("RENAISSANCE")
if renaissance:
return join(renaissance, "jars")
return None
def validateEnvironment(self):
if not self.renaissancePath():
raise RuntimeError(
"The RENAISSANCE environment variable was not specified.")
def before(self, bmSuiteArgs):
self.workdir = mkdtemp(prefix='renaissance-work.', dir='.')
def validateReturnCode(self, retcode):
return retcode == 0
def workingDirectory(self, benchmarks, bmSuiteArgs):
return self.workdir
def classpathAndMainClass(self):
mainClass = "org.scala.RenaissanceSuite"
return ["-cp", self.renaissancePath() + "/*", mainClass]
def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
benchArg = ""
if benchmarks is None:
benchArg = "all"
elif len(benchmarks) == 0:
mx.abort("Must specify at least one benchmark.")
else:
benchArg = ",".join(benchmarks)
vmArgs = self.vmArgs(bmSuiteArgs)
runArgs = self.runArgs(bmSuiteArgs)
return (
vmArgs + self.classpathAndMainClass() + runArgs + [benchArg])
def benchmarkList(self, bmSuiteArgs):
self.validateEnvironment()
out = mx.OutputCapture()
mx.run_java(self.classpathAndMainClass() + ["listraw"], out=out)
return str.splitlines(out.data)
def successPatterns(self):
return []
def failurePatterns(self):
return []
def flakySuccessPatterns(self):
return []
def rules(self, out, benchmarks, bmSuiteArgs):
return [
mx_benchmark.StdOutRule(
r"====== (?P<benchmark>[a-zA-Z0-9_]+) \((?P<benchgroup>[a-zA-Z0-9_]+)\), iteration (?P<iteration>[0-9]+) completed \((?P<value>[0-9]+(.[0-9]*)?) ms\) ======",
{
"benchmark": ("<benchmark>", str),
"vm": "jvmci",
"config.name": "default",
"metric.name": "warmup",
"metric.value": ("<value>", float),
"metric.unit": "ms",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": ("<iteration>", int),
}
),
mx_benchmark.StdOutRule(
r"====== (?P<benchmark>[a-zA-Z0-9_]+) \((?P<benchgroup>[a-zA-Z0-9_]+)\), final iteration completed \((?P<value>[0-9]+(.[0-9]*)?) ms\) ======",
{
"benchmark": ("<benchmark>", str),
"vm": "jvmci",
"config.name": "default",
"metric.name": "time",
"metric.value": ("<value>", float),
"metric.unit": "ms",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": 0,
}
)
]
mx_benchmark.add_bm_suite(RenaissanceBenchmarkSuite())
|
zapster/graal-core
|
mx.graal-core/mx_graal_benchmark.py
|
Python
|
gpl-2.0
| 39,136
|
[
"VisIt"
] |
dc82b509fb921a9f8a8d625875eb2065c6efb95ce6b36e0fca2bd2f7c0f03a1b
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
********************************
**espresso.analysis.Velocities**
********************************
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.analysis.Observable import *
from _espresso import analysis_Velocities
class VelocitiesLocal(ObservableLocal, analysis_Velocities):
'The (local) storage of configurations.'
def __init__(self, system):
cxxinit(self, analysis_Velocities, system)
def gather(self):
return self.cxxclass.gather(self)
def clear(self):
return self.cxxclass.clear(self)
def __iter__(self):
return self.cxxclass.all(self).__iter__()
if pmi.isController :
class Velocities(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.analysis.VelocitiesLocal',
pmicall = [ "gather", "clear" ],
localcall = ["getNParticles", "getCoordinates",
"__getitem__", "__iter__", "all"],
pmiproperty = ["capacity", "size"]
)
|
BackupTheBerlios/espressopp
|
src/analysis/Velocities.py
|
Python
|
gpl-3.0
| 1,919
|
[
"ESPResSo"
] |
cc066eaecc8df0fdc4b00e7edf2ba323eb08de86c4124d40c4cf9bbfe8307d42
|
import scipy
import scipy.interpolate
import numpy
import os
import sys
import SpectralTools
import AstroUtils
import astropy.io.fits as pyfits
import time
import Moog960
class MARCS_Atmosphere( object ):
def __init__(self, df):
data = open(df, 'r')
model = data.readlines()
self.Teff = int(float(model[1].split()[0]))
self.G = numpy.log10(float(model[3].split()[0]))
self.mt = float(model[4].split()[0])
junk = model[6].split()
self.FeH = float(junk[0])
self.Alpha = float(junk[1])
self.nlayers = int(model[22].split()[0])
n = []
lgTauR = []
lgTau5 = []
T = []
Pe = []
Ne = []
Pg = []
Prad = []
Pturb = []
KappaRoss = []
Density = []
Mu = []
Vconv = []
Fconv_F = []
RHOX = []
k = 1.380648e-16
for i in range(self.nlayers):
line = model[25+i].split()
n.append(int(line[0]))
lgTauR.append(float(line[1]))
lgTau5.append(float(line[2]))
T.append(float(line[4]))
Pe.append(float(line[5]))
Ne.append(Pe[-1]/(k*T[-1]))
Pg.append(float(line[6]))
Prad.append(float(line[7]))
Pturb.append(float(line[8]))
line = model[26+self.nlayers+i].split()
KappaRoss.append(float(line[2]))
Density.append(float(line[3]))
Mu.append(float(line[4]))
Vconv.append(float(line[5]))
Fconv_F.append(float(line[6]))
RHOX.append(float(line[7]))
self.n = numpy.array(n)
self.lgTauR = numpy.array(lgTauR)
self.lgTau5 = numpy.array(lgTau5)
self.T = numpy.array(T)
self.Pe = numpy.array(Pe)
self.Ne = numpy.array(Ne)
self.Pg = numpy.array(Pg)
self.Prad = numpy.array(Prad)
self.Pturb = numpy.array(Pturb)
self.KappaRoss = numpy.array(KappaRoss)
self.Density = numpy.array(Density)
self.Mu = numpy.array(Mu)
self.Vconv = numpy.array(Vconv)
self.Fconv_F = numpy.array(Fconv_F)
self.RHOX = numpy.array(RHOX)
def write_BEGN(self, outdir=''):
self.filename = 'MARCS_T'+str(self.Teff)+'_G%.2f'% self.G+'_M%.2f' % self.FeH +'_t%.2f'%self.mt+'.md'
f = open(outdir+self.filename, 'w')
f.write('BEGN\n')
f.write('MARCS T=%d Log g=%.2f Fe/H=%.2f mturb=%.1f\n' % (self.Teff, self.G, self.FeH, self.mt))
f.write(' %d\n' % self.nlayers)
for i in range(self.nlayers):
f.write('%.1f %.1f %.2f %.5E %f %f\n' % (self.lgTauR[i], self.T[i], self.Pg[i], self.Ne[i], self.Mu[i],
self.KappaRoss[i]))
f.write('%d\n' % float(self.mt*100000.0))
f.write('natoms 0 %.2f\n' % self.FeH)
f.write('nmol 12\n')
f.write(' 101. 106. 107. 108. 606. 607. 608.\n')
f.write(' 707. 708. 808. 10108. 60808.\n')
f.close()
class Atmosphere( object ):
def __init__(self, df):
data = open(df, 'r')
junk = data.readline()
coords = data.readline().split()
self.Teff = int(coords[1][2:])
self.G = float(coords[2][2:])
self.F = float(coords[3][4:])
self.m = float(coords[4][3:])
self.nlayers = int(data.readline().split()[0])
self.tauref = numpy.zeros(self.nlayers)
self.T = numpy.zeros(self.nlayers)
self.Theta = numpy.zeros(self.nlayers)
self.Tkev = numpy.zeros(self.nlayers)
self.Tlog = numpy.zeros(self.nlayers)
self.pgas = numpy.zeros(self.nlayers)
self.ne = numpy.zeros(self.nlayers)
self.molweight = numpy.zeros(self.nlayers)
self.kaprefmass = numpy.zeros(self.nlayers)
self.rho = numpy.zeros(self.nlayers)
self.kapref = numpy.zeros(self.nlayers)
self.mt = 0.0
for i in range(self.nlayers):
layer = data.readline().split()
self.tauref[i] = float(layer[0])
self.T[i] = float(layer[1])
self.Theta[i] = 5040./self.T[i]
self.Tkev[i] = 8.6171e-5*self.T[i]
self.Tlog[i] = numpy.log10(self.T[i])
self.pgas[i] = float(layer[2])
self.ne[i] = float(layer[3])
self.molweight[i] = float(layer[4])
self.kaprefmass[i] = float(layer[5])
self.rho[i] = self.pgas[i]*self.molweight[i]*1.6606e-24/(1.38054e-16*self. T[i])
self.kapref[i] = self.kaprefmass[i] * self.rho[i]
self.mt = float(data.readline().split()[0])
data.close()
class progressBar( object ):
def __init__(self, start, stop):
self.start_value = start
self.stop_value = stop
def start(self):
self.currentValue = self.start_value
self.numBlocks = 0
print("Starting Synthesis!")
def update(self, value):
self.currentValue = value
self.percentComplete = (self.currentValue-self.start_value)/(self.stop_value
-self.start_value)*100
if int(self.percentComplete/5) > self.numBlocks:
sys.stdout.write("\r%.2f [%s%s] %.2f - %.2f" % (self.start_value,
'#'*int(self.percentComplete/5),' '*(20-int(self.percentComplete/5)),
self.stop_value, self.currentValue))
sys.stdout.flush()
self.numBlocks = int(self.percentComplete/5)
class periodicTable( object ):
def __init__(self):
self.Zsymbol_table = {}
self.DissE_table = {}
df = open(os.path.dirname(os.path.realpath(__file__))+'/MOOGConstants.dat', 'r')
for line in df.readlines():
l = line.split('-')
self.Zsymbol_table[int(l[0])] = l[1].strip()
self.Zsymbol_table[l[1].strip()] = int(l[0])
if len(l) > 3:
self.DissE_table[int(l[0])] = float(l[3])
self.DissE_table[l[1].strip()] = float(l[3])
df.close()
def translate(self, ID):
retval = self.Zsymbol_table[ID]
return retval
def DissE(self, ID):
retval = self.DissE_table[ID]
return retval
class HITRAN_Dictionary( object ):
def __init__(self):
# HITRAN codes:
# 5 : CO
# 13 : OH
self.isotopes = {5:{1:608.01216,2:608.01316,3:608.01218,4:608.01217,5:608.01318,6:608.01317},
13:{1:108.01160,2:108.01180,3:108.02160}}
self.DissE = {5:11.10, 13:4.412}
class MoogStokes( object ):
def __init__(self, configurationFile, fileBase="moogstokes", **kwargs):
"""
MoogStokes: A python wrapper for MoogStokes
Input:
configurationFile : The name of a file containing configuration parameters
fileBase : the base name for the MoogStokes parameter file/linelists
Member variables
config - a dictionary created from lines contained in the configuration file
lineList - a LineList object containing lines within the defined wavelength range
parameterFile - a ParameterFile object useful for writing Moog-Readable parameter files
T - Temperature in Kelvin
logg - Surface gravity
B - Magnetic field strength in kG
fileName - name of the MoogStokes parameter file
fileBase - base of the filename
wave = array containing wavelengths of the spectrum
flux = array containing the flux of the composite emergent spectrum
"""
self.config = AstroUtils.parse_config(configurationFile)
if "MODELFILE" in kwargs.keys():
self.modelFile = kwargs["MODELFILE"]
self.T = 0.0
self.logg = 0.0
self.B = 0.0
elif "modelFile" in self.config.keys():
self.modelFile = self.config["modelFile"]
self.T = 0.0
self.logg = 0.0
self.B = 0.0
else:
self.T = self.config["Teff"]
self.logg = self.config["logg"]
self.B = self.config["Bfield"]
self.modelFile = None
if 'metallicity' in self.config.keys():
self.metallicity = self.config['metallicity']
else:
self.metallicity = 0.0
if 'mturb' in self.config.keys():
self.mturb = self.config['mturb']
else:
self.mturb = 1.0
if "diskInt" in kwargs.keys():
self.diskInt = kwargs["diskInt"]
elif "diskInt" in self.config.keys():
self.diskInt = self.config["diskInt"]
else:
self.diskInt = "BEACHBALL"
if self.diskInt == "TENNISBALL":
self.LineListFormat="MOOGSCALAR"
else:
self.LineListFormat = "MOOGSTOKES"
if "MoogSandbox" in self.config.keys():
self.MoogSandbox = self.config["MoogSandbox"]
else:
self.MoogSandbox = ''
if "wlStart" in kwargs.keys():
self.config["wlStart"] = kwargs["wlStart"]
if "wlStop" in kwargs.keys():
self.config["wlStop"] = kwargs["wlStop"]
self.fileName = self.MoogSandbox+fileBase+'.par'
self.fileBase = fileBase
self.lineList = LineList(self, self.config)
self.parameterFile = ParameterFile(self, self.config)
self.Spectra = []
self.logtau = []
if "moogInstance" in kwargs.keys():
MoogInstance = kwargs["moogInstance"].upper()
if MoogInstance == "ALPHA":
import MoogStokesPy_Alpha
self.MoogPy = MoogStokesPy_Alpha
elif MoogInstance == "BRAVO":
import MoogStokesPy_Bravo
self.MoogPy = MoogStokesPy_Bravo
elif MoogInstance == "CHARLIE":
import MoogStokesPy_Charlie
self.MoogPy = MoogStokesPy_Charlie
elif MoogInstance == "DELTA":
import MoogStokesPy_Delta
self.MoogPy = MoogStokesPy_Delta
else:
import MoogStokesPy
self.MoogPy = MoogStokesPy
self.MoogPy.charstuff.moogpath = '%-60s'%os.environ.get('MOOGSTOKESSOURCE')
self.MoogPy.recorder = self.recorder
self.MoogPy.stokesrecorder = self.stokesrecorder
self.MoogPy.beachball = self.beachball
self.MoogPy.diskoball = self.diskoball
self.MoogPy.fluxtracer = self.fluxtracer
self.MoogPy.tennisball = self.tennisball
if "progressBar" in kwargs.keys():
if kwargs["progressBar"] == True:
self.progressBar = progressBar(self.config["wlStart"], self.config["wlStop"])
else: self.progressBar = None
else:
self.progressBar = None
def fluxtracer(self, logtau, dtau, Stokes, continuum):
self.logtau.append(logtau)
self.flux_I.append(Stokes[0])
self.flux_Q.append(Stokes[1])
self.flux_U.append(Stokes[2])
self.flux_V.append(Stokes[3])
self.continuum.append(continuum)
def recorder(self, x, y):
self.Spectra[-1].wl.append(x)
self.Spectra[-1].flux_I.append(1.0-y)
def stokesrecorder(self, i, wave, Stokes, continuum):
index = i-1
self.Spectra[index].wl.append(wave)
self.Spectra[index].flux_I.append(Stokes[0])
self.Spectra[index].flux_Q.append(Stokes[1])
self.Spectra[index].flux_U.append(Stokes[2])
self.Spectra[index].flux_V.append(Stokes[3])
self.Spectra[index].continuum.append(continuum)
if i == 1:
if self.progressBar != None:
self.progressBar.update(wave)
def prepareFluxes(self):
for i in range(self.ncells):
header = pyfits.Header()
header.set('CREATION_TIME', time.ctime())
try:
header.set('CREATION_USER', os.getlogin())
except:
header.set('CREATION_USER', os.getenv('USER'))
header.set('CREATION_MACHINE', os.uname()[1])
header.set('MOOGVERSION', self.MoogPy.charstuff.moogversion.tostring())
header.set('WLSTART', self.config["wlStart"])
header.set('WLSTOP', self.config["wlStop"])
header.set('CELL', self.cells[i])
if self.diskInt == 'TENNISBALL':
header.set('SPECTRUM_TYPE', 'MOOG disk-integrated Spectrum')
self.Spectra.append(SpectralTools.Spectrum(wl = [],
I = [], header=header,
spectrum_type='MOOG DISK INTEGRATED'))
elif self.diskInt == 'BEACHBALL':
header.set('PHI_ANGLE', float('%.4f'% self.phi_angle[i]))
header.set('MU', float('%.4f'%self.mus[i]))
header.set('DISKFLAG', self.diskflag)
header.set('SPECTRUM_TYPE', 'BeachBall Emergent Spectrum')
self.Spectra.append(SpectralTools.Spectrum(wl = [],
I = [], Q = [], U = [], V = [], continuum = [], header=header,
spectrum_type='MOOG EMERGENT'))
elif self.diskInt == 'DISKOBALL':
header.set('PHI_ANGLE', float('%.4f'% self.phi_angle[i]))
header.set('CHI_ANGLE', float('%.4f'% self.chi_angle[i]))
header.set('AZIMUTH', float('%.4f' % self.azimuth[i]))
header.set('LONGITUDE', float('%.4f' % self.longitude[i]))
header.set('NRINGS', float('%d' % self.nrings))
header.set('INCLINATION', float('%.4f' % self.inclination))
header.set('POSANGLE', float('%.4f' % self.position_angle))
header.set('MU', float('%.4f'%self.mus[i]))
header.set('DISKFLAG', self.diskflag)
header.set('SPECTRUM_TYPE', 'DiskoBall Emergent Spectrum')
self.Spectra.append(SpectralTools.Spectrum(wl = [],
I = [], Q = [], U = [], V = [], continuum = [], header=header,
spectrum_type='MOOG EMERGENT'))
def tennisball(self):
self.ncells = 1
self.cells = numpy.arange(1)
self.LineListFormat = "MOOGSCALAR"
self.prepareFluxes()
def beachball(self):
self.diskflag = 1
self.ncells = 7
try:
self.deltav = self.config["deltav"]
except:
self.deltav = 0.1
self.cells = numpy.arange(7)
self.phi_angle = self.MoogPy.angles.phi_angle.copy()
self.mus = self.MoogPy.angles.mus.copy()
self.prepareFluxes()
def diskoball(self):
self.diskflag = 0
self.ncells = self.MoogPy.angles.ncells
self.nrings = self.MoogPy.angles.nrings
self.inclination = self.MoogPy.angles.inclination
self.position_angle = self.MoogPy.angles.position_angle
self.phi_angle = self.MoogPy.angles.phi_angle.copy()
self.chi_angle = self.MoogPy.angles.chi_angle.copy()
self.azimuth = self.MoogPy.angles.azimuth.copy()
self.longitude = self.MoogPy.angles.longitude.copy()
self.mus = self.MoogPy.angles.mus.copy()
self.prepareFluxes()
def finishSpectra(self):
for spectrum in self.Spectra:
spectrum.preserve(I=spectrum.flux_I!=None, Q=spectrum.flux_Q!=None,
U=spectrum.flux_U!=None, V=spectrum.flux_V!=None,
continuum=spectrum.continuum!=None)
def run(self, saveRaw=False, **kwargs):
self.lineList.setBfield(self.B)
self.lineList.writeLineLists(parent=self, mode=self.LineListFormat, **kwargs)
self.parameterFile.setName(self.fileBase)
self.parameterFile.setModel(teff=self.T, logg=self.logg,
modelFile=self.modelFile, metallicity=self.metallicity, mturb=self.mturb)
self.parameterFile.writeParFile()
self.MoogPy.charstuff.fparam = self.fileName.ljust(80)
self.MoogPy.atmos.linecount = 0
if self.progressBar != None:
self.progressBar.start()
self.MoogPy.moogstokessilent()
self.finishSpectra()
self.Phrase = Moog960.SyntheticPhrase(rawData=self.Spectra,
diskInt=self.diskInt)
if saveRaw:
filename = self.config["outdir"]+self.config["outbase"]+'_T%d_G%.2f_B%.2f_M%.2f_t%.2f_raw.fits'%(self.config["Teff"],
self.config["logg"], self.config["Bfield"], self.metallicity, self.mturb)
PHKWs = {"BFIELD":self.config["Bfield"], "TEFF":self.config["Teff"], "LOGG":self.config["logg"], "FEH":self.metallicity, "MTURB": self.mturb}
self.Phrase.saveRaw(filename=filename, primaryHeaderKWs=PHKWs)
def trace(self, save=False):
self.lineList.setBfield(self.B)
self.lineList.writeLineLists(parent=self, mode="MOOGSTOKES")
self.MoogPy.charstuff.fparam = self.fileName.ljust(80)
self.parameterFile.setName(self.fileBase)
self.parameterFile.setModel(teff = self.T, logg = self.logg, modelFile=self.modelFile,
metallicity=self.metallicity, mturb=self.mturb)
self.parameterFile.writeParFile()
self.MoogPy.atmos.linecount = 0
self.MoogPy.moogstokessilent()
if save:
out = pyfits.PrimaryHDU([self.logtau, self.flux_I, self.flux_Q, self.flux_U, self.flux_V, self.continuum])
out.header.set('WAVE', self.config["wlProbe"])
out.header.set('BFIELD', self.config["Bfield"])
out.header.set('TEFF', self.config["Teff"])
out.header.set('LOGG', self.config["logg"])
out.writeto(self.config["outdir"]+self.config["outbase"]+'_'+str(self.config["wlProbe"])+'.fits', clobber=True)
return self.logtau, self.flux_I, self.flux_Q, self.flux_U, self.flux_V, self.continuum
class ParameterFile( object ):
def __init__(self, parent, config, **kwargs):
self.parent = parent
self.config = config
self.moogParCfgFile = config['moog_Parameters']
self.moogPars = AstroUtils.parse_config(self.moogParCfgFile)
self.synlimits = numpy.array([config['wlStart'], config['wlStop'],
self.moogPars['synlimits_c'], self.moogPars['synlimits_d']])
if "wlProbe" in self.config.keys():
self.wlProbe = self.config["wlProbe"]
else:
self.wlProbe = None
if "PARFILENAME" in kwargs:
self.parFileName = kwargs["PARFILENAME"]
else:
self.parFileName = self.moogPars['parFileName']
if "atmos_dir" in self.moogPars.keys():
atmos_dir = self.moogPars["atmos_dir"]
self.moogPars["atmos_dir"] = os.environ.get('MOOGPYDATAPATH')+atmos_dir
if 'model_type' in self.config.keys():
self.model_type = self.config['model_type']
else:
self.model_type = 'MARCS'
self.mode = self.moogPars['mode']
self.labels = {'terminal':'x11',
'strong':1,
'atmosphere':1,
'molecules':2,
'lines':0,
'damping':1,
'freeform':2,
'flux/int':0,
'diskflag':1,
'testflag':0}
self.file_labels = {'summary_out':'./Output/summary.out',
'standard_out':'./Output/out1',
'smoothed_out':'./Output/smoothed.out',
'model_in':'',
'lines_in':config['Weak_FileName'],
'stronglines_in':config['Strong_FileName']}
#'out_dir':'',
for l in self.labels:
if l in self.moogPars:
self.labels[l] = self.moogPars[l]
for fl in self.file_labels:
if fl in self.moogPars:
self.file_labels[fl] = self.moogPars[fl]
def setName(self, name):
self.file_labels['lines_in'] = self.parent.MoogSandbox + self.config['Weak_FileName']+'_'+name
self.file_labels['stronglines_in'] = self.parent.MoogSandbox + self.config['Strong_FileName']+'_'+name
self.parFileName = self.parent.MoogSandbox + name+'.par'
def setModel(self, teff=0.0, logg=0.0, modelFile=None, metallicity='0.0', mturb=1.0):
if modelFile==None:
if self.model_type == 'MARCS':
self.file_labels["model_in"] = os.environ.get('MOOGPYDATAPATH')+ \
'Atmospheres/MARCS/MARCS_T'+ str(int(teff))+'_G%.2f'% logg +\
'_M%.2f'%metallicity+'_t%.2f'%mturb+'.md'
print self.file_labels["model_in"]
elif self.model_type == 'BTSettl':
self.file_labels["model_in"] = os.environ.get('MOOGPYDATAPATH')+ \
'Atmopsheres/BTSettl/BTSettl_T'+str(int(teff))
else:
self.file_labels["model_in"] = os.environ.get('MOOGPYDATAPATH') + \
'Atmospheres/' + modelFile
def writeParFile(self):
pf = open(self.parFileName, 'w')
pf.write(self.mode+'\n')
for fl in self.file_labels:
pf.write(fl+' \''+self.file_labels[fl]+'\'\n')
for l in self.labels:
pf.write(l+' '+str(self.labels[l])+'\n')
if self.wlProbe:
pf.write('dipstick %.3f\n' % self.wlProbe)
pf.write('synlimits\n')
pf.write(' %.2f %.2f %.3f %.2f\n' %
(self.synlimits[0], self.synlimits[1],
self.synlimits[2], self.synlimits[3]))
pf.close()
class LineList( object ):
def __init__(self, parent, config):
# Load in configuration file
self.parent = parent
self.MoogPyDataPath = os.environ.get('MOOGPYDATAPATH')
self.strong_file = self.MoogPyDataPath+config['strong_file']
self.VALD_list = self.MoogPyDataPath+config['VALD_file']
self.gf_corrections = self.MoogPyDataPath+config['gf_file']
self.wlStart = config['wlStart']
self.wlStop = config['wlStop']
self.Bfield = config['Bfield']/10.0
self.sfn = parent.MoogSandbox + config['Strong_FileName']
self.wfn = parent.MoogSandbox + config['Weak_FileName']
self.applyCorrections = config['applyCorrections']
self.readInLineLists()
self.nStrong = len(self.strongLines)
self.numLines = self.nStrong+len(self.weakLines)
self.dummyLine = dummy_Line()
def setBfield(self, B):
self.Bfield = B/10.0
for i in range(self.nStrong):
self.strongLines[i].zeeman_splitting(B=self.Bfield)
for i in range(self.numLines - self.nStrong):
if not(self.weakLines[i].DissE):
self.weakLines[i].zeeman_splitting(B=self.Bfield)
def readInLineLists(self):
self.parse_new_VALD()
def parse_new_VALD(self):
pt = periodicTable()
if self.applyCorrections:
self.corrected = []
for line in open(self.gf_corrections, 'r'):
self.corrected.append(MOOG_Line(line))
strong = []
for line in open(self.strong_file, 'r'):
l = line.split()
strong.append([float(l[0]), float(l[1])])
vald_in = open(self.VALD_list, 'r')
l1 = ''
l2 = ''
l3 = ''
l4 = ''
self.strongLines = []
self.weakLines = []
junk = vald_in.readline()
junk = vald_in.readline()
linecounter = 0
lines = [l1, l2, l3, l4]
for line in vald_in:
lines[linecounter] = line
linecounter += 1
if linecounter == 4:
linecounter = 0
if (lines[0][0] == '\''):
wl = float(lines[0].split(',')[1])
if ( (wl > self.wlStart) & (wl < self.wlStop) ):
current_line = New_VALD_Line(lines, pt)
if ( (current_line.expot_lo < 20.0) &
(current_line.species % 1 <= 0.2) ):
if self.applyCorrections:
for cl in self.corrected:
if ((cl.wl == current_line.wl) &
(cl.expot_lo == current_line.expot_lo) &
(cl.loggf != -6.0) ):
current_line.loggf = cl.loggf
current_line.zeeman["NOFIELD"][1] = cl.loggf
if ((cl.VdW != 99.0) & (cl.species < 100.0)):
current_line.VdW = cl.VdW
current_line.stark = cl.stark
current_line.radiative = cl.radiative
current_line.zeeman_splitting(self.Bfield)
species = current_line.species
if ( [wl, species] in strong):
self.strongLines.append(current_line)
else:
self.weakLines.append(current_line)
def getWl(self, index):
if index < self.nStrong:
return self.strongLines[index].wl
else:
return self.weakLines[index-self.nStrong].wl
def getGf(self, index, log=False):
if index < self.nStrong:
if log:
return self.strongLines[index].loggf
else:
return 10.0**(self.strongLines[index].loggf)
else:
if log:
return self.weakLines[index-self.nStrong].loggf
else:
return 10.0**(self.weakLines[index-self.nStrong].loggf)
def getVdW(self, index, log=False):
if index < self.nStrong:
if log:
return self.strongLines[index].VdW
else:
return 10.0**(self.strongLines[index].VdW)
else:
if log:
return self.weakLines[index-self.nStrong].VdW
else:
return 10.0**(self.weakLines[index-self.nStrong].VdW)
def getWl(self, index):
if index < self.nStrong:
return self.strongLines[index].wl
else:
return self.weakLines[index-self.nStrong].wl
def perturbGf(self, index, delta, push=False):
if index < self.nStrong:
self.strongLines[index].modifyGf(delta, push=push)
else:
self.weakLines[index-self.nStrong].modifyGf(delta, push=push)
def perturbVdW(self, index, delta, push=False):
if index < self.nStrong:
self.strongLines[index].modifyVdW(delta, push=push)
else:
self.weakLines[index-self.nStrong].modifyVdW(delta, push=push)
def saveLineList(self, mode="MOOGSCALAR", filename='', changed=False):
outfile = open(filename, 'w')
for strongLine in self.strongLines:
strongLine.dump(out=outfile, mode=mode, changed=changed)
for weakLine in self.weakLines:
weakLine.dump(out=outfile, mode=mode, changed=changed)
outfile.close()
self.sort_file(filename)
def writeLineLists(self, lineIndex=-1, partial=False, mode="MOOGSCALAR", parent=None):
if parent:
strongFile = self.sfn + '_' + parent.fileBase
weakFile = self.wfn + '_' + parent.fileBase
moogPointer = parent.MoogPy
else:
strongFile = self.sfn
weakFile = self.wfn
moogPointer = self.parent.MoogPy
self.parent.MoogPy.linex.start = self.wlStart
self.parent.MoogPy.linex.sstop = self.wlStop
self.parent.parameterFile.synlimits[0] = self.wlStart
self.parent.parameterFile.synlimits[1] = self.wlStop
if lineIndex < 0: #Normal case, write ALL the lines
outfile = open(strongFile, 'w')
for strongLine in self.strongLines:
strongLine.dump(out=outfile, mode=mode)
outfile.close()
self.sort_file(strongFile)
outfile = open(weakFile, 'w')
if len(self.weakLines) == 0:
self.dummyLine.create(self.strongLines[lineIndex].wl, outfile)
else:
for weakLine in self.weakLines:
weakLine.dump(out=outfile, mode=mode)
outfile.close()
self.sort_file(weakFile, weak=True)
""" This worked previously
self.parent.parameterFile.writeParFile()
self.parent.MoogPy.linex.start = self.wlStart
self.parent.MoogPy.linex.sstop = self.wlStop
self.parent.MoogPy.linex.nlines = len(self.strongLines)
self.parent.MoogPy.linex.nstrong = len(self.weakLines)
"""
moogPointer.linex.start = self.wlStart
moogPointer.linex.sstop = self.wlStop
moogPointer.linex.nlines = len(self.strongLines) #Do I need to count lines or components?
moogPointer.linex.nstrong = len(self.weakLines)
elif lineIndex < self.nStrong: # We want to only print out one line
# STRONG LINE
outfile = open(strongFile, 'w')
self.strongLines[lineIndex].dump(out=outfile, mode=mode)
outfile.close()
self.sort_file(strongFile)
outfile = open(weakFile, 'w')
self.dummyLine.create(self.strongLines[lineIndex].wl, outfile)
outfile.close()
self.sort_file(weakFile, weak=True)
# Set Moog varibles
self.parent.MoogPy.linex.nlines = 1
self.parent.MoogPy.linex.nstrong = 1
else:
# WEAK LINE
index = lineIndex-self.nStrong
# Write empty strong line file
outfile = open(strongFile, 'w')
outfile.close()
weakLine = self.weakLines[index]
wlStart = weakLine.wl - 3.0
wlStop = weakLine.wl + 3.0
if partial:
self.parent.parameterFile.synlimits[0] = wlStart
self.parent.parameterFile.synlimits[1] = wlStop
self.parent.MoogPy.linex.start = wlStart
self.parent.MoogPy.linex.sstop = wlStop
#self.parent.parameterFile.writeParFile()
#print self.parent.MoogPy.linex.start
#print self.parent.parameterFile.synlimits
#raw_input()
#self.parent.parameterFile.synlimits[0] = self.wlStart
#self.parent.parameterFile.synlimits[1] = self.wlStop
self.parent.MoogPy.linex.nlines = 1
self.parent.MoogPy.linex.nstrong = 0
outfile = open(weakFile, 'w')
if weakLine.loggf > 0:
self.dummyLine.create(weakLine.wl+0.01, outfile)
weakLine.dump(out=outfile, mode=mode)
outfile.close()
self.sort_file(weakFile, weak=True)
def sort_file(self, name, weak=False):
data = open(name, 'r').readlines()
wl = []
for line in data:
wl.append(float(line[0:10]))
order = numpy.argsort(wl)
out = open(name, 'w')
if weak:
out.write("Weak Lines\n")
for i in order:
out.write(data[i])
out.close()
def applyCorrection(self, corrections):
#"""
for i in range(self.numLines):
if i < self.nStrong:
self.strongLines[i].modifyGf(corrections[i], push=True)
self.strongLines[i].modifyVdW(corrections[i+self.numLines], push=True)
else:
self.weakLines[i-self.nStrong].modifyGf(corrections[i], push=True)
self.weakLines[i-self.nStrong].modifyVdW(corrections[i+self.numLines], push=True)
self.writeLineLists(parent=self.parent)
def setLogGfs(self, loggfs):
for i in range(self.numLines):
if i < self.nStrong:
self.strongLines[i].setLogGf(loggfs[i])
else:
self.weakLines[i-self.nStrong].setLogGf(loggfs[i])
def setVdWs(self, VdWs):
for i in range(self.numLines):
if i < self.nStrong:
self.strongLines[i].setVdW(VdWs[i])
else:
self.weakLines[i-self.nStrong].setVdW(VdWs[i])
def tossLines(self, indices):
nTossed = 0
for index in indices:
index -= nTossed
if index < self.nStrong:
junk = self.strongLines.pop(index)
self.nStrong -= 1
else:
junk = self.weakLines.pop(index-self.nStrong)
nTossed+=1
self.numLines -= 1
class Spectral_Line( object ):
def __init__(self):
self.wl = None
self.species = None
self.expot_lo = None
self.loggf = None
self.DissE = None
self.VdW = None
self.loggfHistory = []
self.VdWHistory = []
self.radiative = None
self.stark = None
self.zeeman = {}
self.transition = None
self.J_lo = None
self.J_hi = None
self.g_lo = None
self.g_hi = None
self.g_eff = None
self.verbose = False
self.Bfield = 0.0
def __str__(self):
return "%.1f line at %.4f: log gf = %.3f, VdW = %.3f" % (self.species,
self.wl, self.loggf, self.VdW)
def __eq__(self, other):
return (self.wl == other.wl) & (self.species ==
other.species) & (self.expot_lo == other.expot_lo)
def modifyGf(self, delta_loggf, push=False):
if push:
self.loggfHistory.append(self.loggf)
self.loggf = numpy.log10(10.0**self.loggf + delta_loggf)
if numpy.isnan(self.loggf):
self.loggf = -6.0
self.zeeman_splitting()
def modifyVdW(self, delta_VdW, push=False):
if not(self.VdW):
self.VdW = -7.5 # No damping value currently, try a guess
if push:
self.VdWHistory.append(self.VdW)
self.VdW += delta_VdW
if self.VdW >= -5.0:
self.VdW = -4.999
if self.VdW < -9.5:
self.VdW = -9.5
def setLogGf(self, loggf):
self.loggf = loggf
self.zeeman_splitting()
def setVdW(self, VdW):
self.VdW = VdW
def dump(self, **kwargs):
if "changed" in kwargs:
if kwargs["changed"] == True:
if ((len(self.loggfHistory) == 0) & (len(self.VdWHistory) == 0)):
return
if (self.VdW == -9.5):
return
if (self.loggf == -6.0):
return
if "out" in kwargs:
out = kwargs["out"]
if kwargs["mode"].upper() == 'MOOGSTOKES':
if( (self.expot_lo < 20.0) & (self.species % 1 <= 0.2)):
if not(self.DissE):
for i in range(len(self.zeeman["PI"][0])):
out.write('%10.3f%10s%10.3f%10.5f' %
(self.zeeman["PI"][0][i],
self.species,self.expot_lo,self.zeeman["PI"][1][i]))
if not(self.VdW):
out.write('%20s%20.3f'% (' ',0.0))
else:
out.write('%10.3f%20s%10.3f' %
(self.VdW, ' ', 0.0))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
for i in range(len(self.zeeman["LCP"][0])):
out.write('%10.3f%10s%10.3f%10.5f' %
(self.zeeman["LCP"][0][i],
self.species,self.expot_lo,self.zeeman["LCP"][1][i]))
if not(self.VdW):
out.write('%20s%20.3f'% (' ',-1.0))
else:
out.write('%10.3f%20s%10.3f' %
(self.VdW, ' ', -1.0))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
for i in range(len(self.zeeman["RCP"][0])):
out.write('%10.3f%10s%10.3f%10.5f' %
(self.zeeman["RCP"][0][i],
self.species,self.expot_lo,self.zeeman["RCP"][1][i]))
if not(self.VdW):
out.write('%20s%20.3f'% (' ',1.0))
else:
out.write('%10.3f%20s%10.3f' %
(self.VdW, ' ', 1.0))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
else:
#RCP
out.write('%10.3f%10.5f%10.3f%10.3f' %
(self.wl, self.species, self.expot_lo,self.loggf))
if not(self.VdW):
out.write('%10s%10.3f%20.3f' %
(' ',self.DissE, 1.0))
else:
out.write('%10.3f%10.3f%20.3f' %
(self.VdW, self.DissE, 1.0))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
#PI
out.write('%10.3f%10.5f%10.3f%10.3f' %
(self.wl, self.species, self.expot_lo,self.loggf))
if not(self.VdW):
out.write('%10s%10.3f%20.3f' %
(' ',self.DissE, 0.0))
else:
out.write('%10.3f%10.3f%20.3f' %
(self.VdW, self.DissE, 0.0))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
#LCP
out.write('%10.3f%10.5f%10.3f%10.3f' %
(self.wl, self.species, self.expot_lo,self.loggf))
if not(self.VdW):
out.write('%10s%10.3f%20.3f' %
(' ',self.DissE, -1.0))
else:
out.write('%10.3f%10.3f%20.3f' %
(self.VdW, self.DissE, -1.0))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
elif kwargs["mode"].upper() == "MOOGSCALAR":
if( (self.expot_lo < 20.0) & (self.species % 1 <= 0.2)):
if not(self.DissE):
out.write('%10.3f%10s%10.3f%10.5f' %
(self.zeeman["NOFIELD"][0],
self.species,self.expot_lo,
self.zeeman["NOFIELD"][1]))
if not(self.VdW):
out.write('%40s'% (' '))
else:
out.write('%10.3f%30s' %
(self.VdW, ' '))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
else:
out.write('%10.3f%10.5f%10.3f%10.3f' %
(self.wl, self.species, self.expot_lo,self.loggf))
if not(self.VdW):
out.write('%10s%10.3f%20s' %
(' ',self.DissE, ' '))
else:
out.write('%10.3f%10.3f%20s' %
(self.VdW, self.DissE, ' '))
if not(self.radiative):
out.write('%10.3s'% (' '))
else:
out.write('%10.3f' %
(self.radiative))
if not(self.stark):
out.write('%10s\n'% (' '))
else:
out.write('%10.3f\n' %
(self.stark))
def zeeman_splitting(self, B=None, **kwargs):
if B:
self.Bfield = B
self.zeeman["NOFIELD"] = [self.wl, self.loggf]
self.compute_zeeman_transitions(**kwargs)
wl = []
lgf = []
for transition in self.pi_transitions:
if (transition.weight > 0):
wl.append(transition.wavelength)
lgf.append(numpy.log10(transition.weight*
10.0**(self.loggf)))
self.zeeman["PI"] = [numpy.array(wl), numpy.array(lgf)]
wl = []
lgf = []
for transition in self.lcp_transitions:
if (transition.weight > 0):
wl.append(transition.wavelength)
lgf.append(numpy.log10(transition.weight*
10.0**(self.loggf)))
self.zeeman["LCP"] = [numpy.array(wl), numpy.array(lgf)]
wl = []
lgf = []
for transition in self.rcp_transitions:
if (transition.weight > 0):
wl.append(transition.wavelength)
lgf.append(numpy.log10(transition.weight*
10.0**(self.loggf)))
self.zeeman["RCP"] = [numpy.array(wl), numpy.array(lgf)]
def compute_zeeman_transitions(self, **kwargs):
B = self.Bfield
bohr_magneton = 5.78838176e-5 #eV*T^-1
hc = 12400 #eV*Angstroms
lower_energies = {}
upper_energies = {}
for mj in self.lower.mj:
lower_energies[mj]=self.lower.E+mj*self.lower.g*bohr_magneton*B
for mj in self.upper.mj:
upper_energies[mj] = self.upper.E+mj*self.upper.g*bohr_magneton*B
pi_transitions = []
lcp_transitions = []
rcp_transitions = []
pi_weight = 0.0
lcp_weight = 0.0
rcp_weight = 0.0
delta_J = self.upper.J - self.lower.J
J1 = self.lower.J
self.geff = (0.5*(self.lower.g+self.upper.g)
+0.25*(self.lower.g-self.upper.g)*(self.lower.J*(self.lower.J+1)-
self.upper.J*(self.upper.J+1.0)))
for mj in lower_energies.keys():
if (delta_J == 0.0):
if upper_energies.has_key(mj+1.0): #delta Mj = +1 sigma component
weight = (J1-mj)*(J1+mj+1.0)
rcp_transitions.append(zeemanTransition(hc/
(upper_energies[mj+1]-lower_energies[mj]), weight,
mj+1, mj))
rcp_weight+=weight
if upper_energies.has_key(mj): #delta Mj = 0 Pi component
weight = mj**2.0
pi_transitions.append(zeemanTransition(hc/
(upper_energies[mj]-lower_energies[mj]), weight,
mj, mj))
pi_weight+=weight
if upper_energies.has_key(mj-1.0): #delta Mj = -1 sigma component
weight = (J1+mj)*(J1-mj+1.0)
lcp_transitions.append(zeemanTransition(hc/
(upper_energies[mj-1]-lower_energies[mj]), weight,
mj-1, mj))
lcp_weight+=weight
if (delta_J == 1.0):
if upper_energies.has_key(mj+1.0): #delta Mj = +1 sigma component
weight = (J1+mj+1.0)*(J1+mj+2.0)
rcp_transitions.append(zeemanTransition(hc/
(upper_energies[mj+1]-lower_energies[mj]), weight,
mj+1, mj))
rcp_weight+=weight
if upper_energies.has_key(mj): #delta Mj = 0 Pi component
weight = (J1+1.0)**2.0 - mj**2.0
pi_transitions.append(zeemanTransition(hc/
(upper_energies[mj]-lower_energies[mj]), weight,
mj, mj))
pi_weight+=weight
if upper_energies.has_key(mj-1.0): #delta Mj = -1 sigma component
weight = (J1-mj+1.0)*(J1-mj+2.0)
lcp_transitions.append(zeemanTransition(hc/
(upper_energies[mj-1]-lower_energies[mj]), weight,
mj-1, mj))
lcp_weight+=weight
if (delta_J == -1.0):
if upper_energies.has_key(mj+1.0): #delta Mj = +1 sigma component
weight = (J1-mj)*(J1-mj-1.0)
rcp_transitions.append(zeemanTransition(hc/
(upper_energies[mj+1]-lower_energies[mj]), weight,
mj+1, mj))
rcp_weight+=weight
if upper_energies.has_key(mj): #delta Mj = 0 Pi component
weight = J1**2.0 - mj**2.0
pi_transitions.append(zeemanTransition(hc/
(upper_energies[mj]-lower_energies[mj]), weight,
mj, mj))
pi_weight+=weight
if upper_energies.has_key(mj-1.0): #delta Mj = -1 sigma component
weight = (J1+mj)*(J1+mj-1.0)
lcp_transitions.append(zeemanTransition(hc/
(upper_energies[mj-1]-lower_energies[mj]), weight,
mj-1, mj))
lcp_weight+=weight
for transition in rcp_transitions:
transition.weight /= rcp_weight
for transition in lcp_transitions:
transition.weight /= lcp_weight
for transition in pi_transitions:
transition.weight /= pi_weight
self.pi_transitions = pi_transitions
self.lcp_transitions = lcp_transitions
self.rcp_transitions = rcp_transitions
class dummy_Line( Spectral_Line ):
def __init__(self):
self.wl = 0.0
self.species = 26.0
self.element = 26.0
self.ionization = 0.0
self.loggf = -5.5
self.expot_lo = 7.8
self.Bfield = 0
self.VdW = None
self.radiative =None
self.stark = None
self.DissE = None
self.zeeman = {}
self.zeeman["NOFIELD"] = [self.wl,self.loggf]
def create(self, wl, outfile):
self.wl = wl
self.zeeman["NOFIELD"][0] = wl
self.dump(out=outfile, mode='MOOGSCALAR')
class MOOG_Line( Spectral_Line ):
def __init__(self, line, **kwargs):
self.wl = float(line[0:11])
self.species = float(line[10:21])
self.element = numpy.round(self.species)
self.ionization = (self.species - self.element)*10.0
self.loggf = float(line[30:41])
self.loggfHistory = []
self.VdWHistory = []
self.expot_lo = float(line[20:31])
self.Bfield = 0.0
self.zeeman = {}
self.zeeman["NOFIELD"] = [self.wl, self.loggf]
try:
self.VdW = float(line[40:51])
except:
self.VdW = None
try:
self.radiative = float(line[80:91])
except:
self.radiative = None
try:
self.stark = float(line[90:101])
except:
self.stark = None
try:
self.DissE = float(line[50:61])
except:
self.DissE = None
class VALD_Line( Spectral_Line ):
def __init__(self, line1, line2='', pt='', **kwargs):
l1 = line1.split(',')
l2 = line2.split()
self.element = pt.translate(l1[0].strip('\'').split()[0])
self.ionization = int(l1[0].strip('\'').split()[1])-1
self.species = self.element + self.ionization/10.0
self.wl = float(l1[1])
self.loggf = float(l1[2])
self.expot_lo = float(l1[3])
self.J_lo = float(l1[4])
self.expot_hi = float(l1[5])
self.J_hi = float(l1[6])
self.g_lo = float(l1[7])
self.g_hi = float(l1[8])
self.g_eff = float(l1[9])
self.radiative = float(l1[10])
self.stark = float(l1[11])
self.VdW = float(l1[12])
self.loggfHistory = []
self.VdWHistory = []
self.DissE = None
self.transition = line2.strip().strip('\'')
self.Bfield = 0.0
if "verbose" in kwargs:
self.verbose = kwargs["verbose"]
else:
self.verbose = False
if (self.g_lo == 99.0):
if not (self.species in [70.1, 25.2]):
angmom = {"S":0, "P":1, "D":2, "F":3, "G":4, "H":5,
"I":6, "K":7, "L":8, "M":9}
n = 0
try:
for char in self.transition:
if char.isdigit():
S = (float(char)-1.0)/2.0
if ((char.isupper()) & (n < 2)):
n+=1
L = angmom[char]
if n == 1:
if (self.J_lo > 0.0):
self.g_lo = (1.5+(S*(S+1.0)-L*(L+1))/
(2*self.J_lo*(self.J_lo+1)))
else:
self.g_lo = 0.0
else:
if (self.J_hi > 0.0):
self.g_hi = (1.5+(S*(S+1.0)-L*(L+1))/
(2*self.J_hi*(self.J_hi+1)))
else:
self.g_hi = 0.0
except:
self.g_lo = 0.0
self.g_hi = 0.0
if self.verbose:
print("Parsing VALD Transition Failed! %f" % self.wl)
print("%s\n" % self.transition)
else:
self.g_lo = 0.0
self.g_hi = 0.0
self.lower = Energy_Level(self.J_lo, self.g_lo, self.expot_lo)
self.upper = Energy_Level(self.J_hi, self.g_hi,
self.expot_lo+12400.0/self.wl)
self.zeeman = {}
self.zeeman["NOFIELD"] = [self.wl,self.loggf]
class New_VALD_Line( Spectral_Line ):
def __init__(self, lines, pt='', **kwargs):
l1 = lines[0].split(',')
self.ID = l1[0].strip('\'').split()
self.element = pt.translate(self.ID[0])
self.Bfield = 0.0
if "verbose" in kwargs:
self.verbose = kwargs["verbose"]
else:
self.verbose = False
if self.element > 1000:
isotopes = lines[3].strip().strip('\'').split()[-1].split('(')
A1 = int(isotopes[1].split(')')[0])
if len(isotopes) == 2:
A2 = 1
else:
A2 = int(isotopes[2].split(')')[0])
heavy = max(A1, A2)
light = min(A1, A2)
self.species = numpy.float("%4.1f%02d%02d" % (self.element/10., light, heavy))
#self.species = "%4.1f%2d%2d" % (self.element/10., (10.0+float(l[10])/10)+0.0001*(10.0+float(l[10])%10)
self.wl = float(l1[1])
self.loggf = float(l1[2])
self.expot_lo = float(l1[3])
self.J_lo = float(l1[4])
self.expot_hi = float(l1[5])
self.J_hi = float(l1[6])
self.g_lo = float(l1[7])
self.g_hi = float(l1[8])
self.g_eff = float(l1[9])
self.radiative = float(l1[10])
self.stark = float(l1[11])
self.VdW = float(l1[12])
self.loggfHistory = []
self.VdWHistory = []
self.DissE = pt.DissE(self.ID[0])
else:
self.ionization = int(self.ID[1])-1
self.species = self.element + self.ionization/10.0
self.wl = float(l1[1]) # WL in Air
self.loggf = float(l1[2])
self.expot_lo = float(l1[3]) # in eV
self.J_lo = float(l1[4])
self.expot_hi = float(l1[5])
self.J_hi = float(l1[6])
self.g_lo = float(l1[7])
self.g_hi = float(l1[8])
self.g_eff = float(l1[9])
self.radiative = float(l1[10])
if self.element == 1.0:
self.stark = -4.0
self.VdW = -5.4
else:
self.stark = float(l1[11])
self.VdW = float(l1[12])
self.loggfHistory = []
self.VdWHistory = []
self.DissE = None
self.upperTerm = lines[1].strip().strip('\'').split()
self.lowerTerm = lines[2].strip().strip('\'').split()
self.references = lines[3].strip().strip('\'')
if (self.g_lo == 99.0):
# Lower State
try:
if self.lowerTerm[0] == 'LS':
self.g_lo = self.parse_LS_coupling(self.lowerTerm[-1], self.J_lo)
elif self.lowerTerm[0] == 'JJ':
#print("Awww Shucks, the JJ coupling parser isn't ready yet!")
#self.g_lo = self.parse_JJ_coupling(self.lowerTerm[-1])
self.g_lo = 0.0
elif self.lowerTerm[0] == 'JK':
#print("Awww Shucks, the JK coupling parser isn't ready yet!")
self.g_lo = 0.0
elif self.lowerTerm[0] == 'LK':
#print("Awww Shucks, the LK coupling parser isn't ready yet!")
self.g_lo = 0.0
else:
self.g_lo = 0.0
except:
print("Lower state of line at %.3f failed!" % self.wl)
self.g_lo = 0.0
# Upper State
try:
if self.upperTerm[0] == 'LS':
self.g_hi = self.parse_LS_coupling(self.upperTerm[-1], self.J_hi)
elif self.upperTerm[0] == 'JJ':
#print("Awww Shucks, the JJ coupling parser isn't ready yet!")
self.g_hi = 0.0
elif self.upperTerm[0] == 'JK':
#print("Awww Shucks, the JK coupling parser isn't ready yet!")
self.g_hi = 0.0
elif self.upperTerm[0] == 'LK':
#print("Awww Shucks, the LK coupling parser isn't ready yet!")
self.g_hi = 0.0
else:
self.g_hi = 0.0
except:
print("Upper state of line at %.3f failed!" % self.wl)
self.g_hi = 0.0
self.lower = Energy_Level(self.J_lo, self.g_lo, self.expot_lo)
self.upper = Energy_Level(self.J_hi, self.g_hi,
self.expot_lo+12400.0/self.wl)
self.zeeman = {}
self.zeeman["NOFIELD"] = [self.wl,self.loggf]
def parse_LS_coupling(self, term, J):
if J <= 0:
return 0.0
angmom = {"S":0, "P":1, "D":2, "F":3, "G":4, "H":5,
"I":6, "K":7, "L":8, "M":9}
try:
if term[0].isdigit():
S = (float(term[0])-1.0)/2.0
else:
S = (float(term[1])-1.0)/2.0
if term[-1] == '*':
L = angmom[term[-2]]
else:
L = angmom[term[-1]]
lande_g = (1.5+(S*(S+1.0)-L*(L+1))/
(2.0*J*(J+1.0)))
except:
print("Parsing of LS term at %.3f FAILED!!!" % self.wl)
lande_g = 0.0
return lande_g
def parse_JJ_coupling(self, term):
term = term.strip('*').replace('(','').replace(')','').split(',')
J1 = term[0].split('/')
if len(J1) == 2:
J1 = float(J1[0])/float(J1[1])
else:
J1 = float(J1[0])
J2 = term[1].split('/')
if len(J2) == 2:
J2 = float(J2[0])/float(J2[1])
else:
J2 = float(J2[0])
lande_g = 0.0
return lande_g
class Plez_CN_Line( Spectral_Line ):
def __init__(self, line, **kwargs):
l = line.split()
self.wl = float(l[0])
self.species = float(l[1])
self.DissE = 7.72
self.expot_lo = float(l[2])
self.loggf = float(l[3])
self.VdW = None
self.radiative = None
self.stark = None
self.zeeman = {}
self.transition = None
self.J_lo = None
self.J_hi = None
self.g_lo = None
self.g_hi = None
self.g_eff = None
self.loggfHistory = []
self.VdWHistory = []
self.zeeman["NOFIELD"] = [self.wl, self.loggf]
self.Bfield = 0.0
if "verbose" in kwargs:
self.verbose = kwargs["verbose"]
else:
self.verbose = False
class Goorvitch_CO_Line( Spectral_Line ):
def __init__(self, line, **kwargs):
l = line.split('|')
try:
self.wl = 1.0e8/float(l[0])
self.species = 0608.0+0.001*(10.0+float(l[10])/10)+0.0001*(10.0+float(l[10])%10)
self.DissE = 11.10
self.expot_lo = 1.23981e-4*float(l[3])
self.loggf = numpy.log10(float(l[4]))
self.VdW = None
self.radiative = None
self.stark = None
self.zeeman = {}
self.transition = None
self.J_lo = None
self.J_hi = None
self.g_lo = None
self.g_hi = None
self.g_eff = None
self.loggfHistory = []
self.VdWHistory = []
self.zeeman["NOFIELD"] = [self.wl, self.loggf]
self.Bfield = 0.0
if "verbose" in kwargs:
self.verbose = kwargs["verbose"]
else:
self.verbose = False
except:
self.wl = -99.9
class HITRAN_Line( Spectral_Line ):
def __init__(self, line, hitran_dictionary, **kwargs):
hitran_code = int(line[0:2])
isotope_code = int(line[2])
self.species = hitran_dictionary.isotopes[hitran_code][isotope_code]
self.DissE = hitran_dictionary.DissE[hitran_code]
self.wl = 10000.0/float(line[3:15])*10000.0/1.000273
self.expot_lo = 1.23986e-4*float(line[45:56])
Einstein_A = float(line[26:35])
g_up = float(line[145:154])
g_low = float(line[154:])
self.loggf = numpy.log10(1.4991e-16*self.wl**2*g_up*Einstein_A)
self.VdW = None
self.radiative = None
self.stark = None
self.zeeman = {}
self.transition = None
self.J_lo = None
self.J_hi = None
self.g_lo = None
self.g_hi = None
self.g_eff = None
self.loggfHistory = []
self.VdWHistory = []
self.zeeman["NOFIELD"] = [self.wl, self.loggf]
self.Bfield = 0.0
if "verbose" in kwargs:
self.verbose = kwargs["verbose"]
else:
self.verbose = False
class zeemanTransition( object):
def __init__(self, wavelength, weight, m_up, m_low):
self.wavelength = wavelength
self.weight = weight
self.m_up = m_up
self.m_low = m_low
def __eq__(self, other):
return ( (self.wavelength == other.wavelength) &
(self.m_up == other.m_up) & (self.m_low == other.m_low) )
class Energy_Level( object ):
def __init__(self, J, g, E):
self.E = E
self.J = J
if g != 99:
self.g = g
else:
self.g = 1.0
self.mj = numpy.arange(self.J, (-1.0*self.J)-0.5, step = -1)
def generate_CorrectedLines(original_files, new_files, outfile, compfile):
out = open(outfile, 'w')
comp = open(compfile, 'w')
outlines = []
complines = []
for orig, new in zip(original_files, new_files):
with open(orig) as o, open(new) as n:
old_lines = o.readlines()
new_lines = n.readlines()
for ol, nl in zip(old_lines, new_lines):
if ol != nl:
outlines.append(nl)
complines.append(ol)
order = numpy.argsort(outlines)
out.writelines(numpy.array(outlines)[order])
comp.writelines(numpy.array(complines)[order])
out.close()
comp.close()
def parse_VALD(VALD_list, strong_file, wl_start, wl_stop, Bfield,
gf_corrections):
pt = periodicTable()
corrected = []
for line in open(gf_corrections, 'r'):
corrected.append(MOOG_Line(line))
strong = []
for line in open(strong_file, 'r'):
l = line.split()
strong.append([float(l[0]), float(l[1])])
vald_in = open(VALD_list, 'r')
l1 = ''
l2 = ''
stronglines = []
weaklines = []
for line in vald_in:
if line[0] != '#':
if line[0] == '\'':
l1 = line
else:
l2 = line
current_line = VALD_Line(l1, l2, pt)
wl = current_line.wl
if ( (wl > wl_start) & (wl < wl_stop) ):
for cl in corrected:
if (cl.wl == current_line.wl) & (cl.expot_lo ==
current_line.expot_lo):
print("Making a correction!")
current_line.loggf = cl.loggf
current_line.zeeman["NOFIELD"][1] = cl.loggf
current_line.VdW = cl.VdW
current_line.stark = cl.stark
current_line.radiative = cl.radiative
current_line.zeeman_splitting(Bfield)
species = current_line.species
if ( [wl, species] in strong):
stronglines.append(current_line)
else:
weaklines.append(current_line)
return stronglines, weaklines
def parse_HITRAN(HITRAN_file, wl_start, wl_stop, B_field,
gf_corrections, **kwargs):
corrected = []
for line in open(gf_corrections, 'r'):
corrected.append(MOOG_Line(line))
ht = HITRAN_Dictionary()
hitran_in = open(HITRAN_file, 'r')
lines = []
for line in hitran_in:
current_line = HITRAN_Line(line, ht)
if ( (current_line.wl > wl_start) & (current_line.wl < wl_stop) ):
for cl in corrected:
if (cl.wl == current_line.wl) & (cl.expot_lo ==
current_line.expot_lo):
current_line.loggf = cl.loggf
current_line.VdW = cl.VdW
current_line.stark = cl.stark
current_line.radiative = cl.radiative
if "weedout" in kwargs:
if current_line.expot_lo < kwargs["weedout"]:
lines.append(current_line)
else:
print('Tossed CO line!')
else:
lines.append(current_line)
return lines
def parse_Plez_CN(CN_file, wl_start, wl_stop, B_field, gf_corrections,
**kwargs):
corrected = []
for line in open(gf_corrections, 'r'):
corrected.append(MOOG_Line(line))
cn_in = open(CN_file, 'r')
lines = []
for line in cn_in:
current_line = Plez_CN_Line(line)
if ( (current_line.wl > wl_start) & (current_line.wl < wl_stop) ):
for cl in corrected:
if (cl.wl == current_line.wl) & (cl.expot_lo ==
current_line.expot_lo):
current_line.loggf = cl.loggf
current_line.VdW = cl.VdW
current_line.stark = cl.stark
current_line.radiative = cl.radiative
lines.append(current_line)
return lines
def parse_Goorvitch_CO(CO_file, wl_start, wl_stop, B_field, gf_corrections,
**kwargs):
corrected = []
for line in open(gf_corrections, 'r'):
corrected.append(MOOG_Line(line))
co_in = open(CO_file, 'r')
lines = []
for line in co_in:
current_line = Goorvitch_CO_Line(line)
if ( (current_line.wl > wl_start) & (current_line.wl < wl_stop) ):
for cl in corrected:
if (cl.wl == current_line.wl) & (cl.expot_lo ==
current_line.expot_lo):
current_line.loggf = cl.loggf
current_line.VdW = cl.VdW
current_line.stark = cl.stark
current_line.radiative = cl.radiative
lines.append(current_line)
return lines
def write_par_file(wl_start, wl_stop, stage_dir, b_dir, prefix, temps=None,
gravs=None, mode='gridstokes', strongLines=False, **kwargs):
if mode=='gridstokes':
fn = 'batch.par'
suffix = '.stokes'
elif mode == 'gridsyn':
fn = 'batch.gridsyn'
suffix = '.scalar'
elif mode == 'stokes':
fn = 'batch.stokes'
suffix = '.stokes'
elif mode == 'synth':
fn = 'batch.synth'
suffix = '.scalar'
outfile_name = os.path.join(stage_dir,'Parfiles', b_dir, fn)
if "OUT_DIR" in kwargs.keys():
output_prefix = kwargs["OUT_DIR"]
else:
output_prefix = '../../Output/'+b_dir+'/'
line_prefix = '../../Linelists/'+b_dir+'/'
labels = {'terminal':'x11',
'strong':1,
'atmosphere':1,
'molecules':2,
'lines':1,
'damping':1,
'freeform':2,
'flux/int':0,
'diskflag':1}
#'plot':2,
#'obspectrum':5}
file_labels = {'summary_out':'../../Output/'+b_dir+'/summary.out',
'standard_out':output_prefix+'out1',
'smoothed_out':output_prefix+'smoothed.out',
'atmos_dir':'/home/deen/Data/Atmospheres/MARCS/',
'out_dir':output_prefix,
'lines_in':line_prefix+prefix+'_weak_linelist'+suffix,
'stronglines_in':line_prefix+prefix+'_strong_linelist'+suffix}
#'model_in':'model.md',
#'observed_in':'observed.dat'}
for l in labels:
if l in kwargs:
labels[l] = kwargs[l]
for fl in file_labels:
if fl in kwargs:
file_labels[fl] = kwargs[fl]
pf = open(outfile_name, 'w')
pf.write(mode+'\n')
for fl in file_labels:
pf.write(fl+' \''+file_labels[fl]+'\'\n')
for l in labels:
pf.write(l+' '+str(labels[l])+'\n')
pf.write('synlimits\n')
pf.write(' '+str(wl_start)+' '
+str(wl_stop)+' 0.01 3.50\n')
pf.write('plotpars 1\n')
pf.write(' '+str(wl_start)+' '
+str(wl_stop)+' 0.02 1.00\n')
pf.write(' 0.00 0.000 0.000 1.00\n')
pf.write(' g 0.150 0.00 0.00 0.00 0.00\n')
if ( (mode=='gridstokes') | (mode=='gridsyn')):
run_number = 1
if (not temps):
temps = range(2500, 4100, 100)+range(4250, 6250, 250)
if (not gravs):
gravs = range(300, 550, 50)
for T in temps:
for G in gravs:
pf.write('RUN '+str(run_number)+'\n')
if mode == 'gridstokes':
pf.write('stokes_out \''+prefix+
'_MARCS_T'+str(T)+'G'+str(G)+'\'\n')
else:
pf.write('smoothed_out \''+prefix+
'_MARCS_T'+str(T)+'G'+str(G)+'\'\n')
pf.write('hardpost_out \'../../Output/'+b_dir+'/dummy.ps\'\n')
pf.write('model_in \'MARCS_T'+
str(T)+'_G'+str(G/100.0)+'_M0.0_t2.0.md\'\n')
pf.write('abundances 1 1\n')
pf.write(' 12 0.0\n')
run_number += 1
pf.close()
class Angle( object ):
def __init__(self, line):
l = line.split()
self.n = int(l[0])
self.az = [float(l[1]), float(l[2]), float(l[3])]
self.longitude = [float(l[4]), float(l[5])]
self.phi = float(l[6])
self.chi = float(l[7])
self.mu = float(l[8])
class Diskoball( object ):
def __init__(self, name, **kwargs):
self.name = name
if "DIR" in kwargs.keys():
self.directory = kwargs["DIR"]
else:
self.directory = '../'
if "VSINI" in kwargs.keys():
self.vsini = kwargs["VSINI"]
else:
self.vsini = 0.0
self.dfI = self.directory+self.name+'.spectrum_I'
self.dfQ = self.directory+self.name+'.spectrum_Q'
self.dfU = self.directory+self.name+'.spectrum_U'
self.dfV = self.directory+self.name+'.spectrum_V'
self.dfCont = self.directory+self.name+'.continuum'
self.dfAngles = self.directory+self.name+'.angles'
Angles = open(self.dfAngles, 'r')
StokesI = open(self.dfI, 'r')
StokesQ = open(self.dfQ, 'r')
StokesU = open(self.dfU, 'r')
StokesV = open(self.dfV, 'r')
Continuum = open(self.dfCont, 'r')
linecounter = 0
self.ang_info = []
for line in Angles:
if linecounter == 0:
l = line.split()
self.ncells = int(l[0])
self.nrings = int(l[1])
self.inclination = float(l[2])
self.PA = float(l[3])
self.cell_area = 4.0*3.1415926/self.ncells
linecounter +=1
else:
self.ang_info.append(Angle(line))
wl = []
I = []
Q = []
U = []
V = []
C = []
for line in StokesI:
l = line.split()
wl.append(float(l[0]))
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Warning! I crazy format")
a.append(float(0.0))
I.append(a)
for line in StokesQ:
l = line.split()
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Warning! Q crazy format")
a.append(float(0.0))
Q.append(a)
for line in StokesU:
l = line.split()
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Warning! U crazy format")
a.append(float(0.0))
U.append(a)
for line in StokesV:
l = line.split()
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Warning! V crazy format")
a.append(float(0.0))
V.append(a)
for line in Continuum:
l = line.split()
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Warning! C crazy format")
a.append(float(0.0))
C.append(a)
self.wl = numpy.array(wl)
I = numpy.array(I)
Q = numpy.array(Q)
U = numpy.array(U)
V = numpy.array(V)
C = numpy.array(C)
self.I = I.transpose()
self.Q = Q.transpose()
self.U = U.transpose()
self.V = V.transpose()
self.C = C.transpose()
wave = numpy.mean(self.wl)
if ((1.0/(wave/10000.0)) < 2.4):
self.alpha = -0.023 + 0.292/(wave/10000.0)
else:
self.alpha = -0.507 + 0.441/(wave/10000.0)
def interpolate(self, stepsize):
self.wave = numpy.arange(self.wl[0], self.wl[-1], step=stepsize)
fI = scipy.interpolate.UnivariateSpline(self.wl, self.integrated_I, s=0)
fQ = scipy.interpolate.UnivariateSpline(self.wl, self.integrated_Q, s=0)
fU = scipy.interpolate.UnivariateSpline(self.wl, self.integrated_U, s=0)
fV = scipy.interpolate.UnivariateSpline(self.wl, self.integrated_V, s=0)
fC = scipy.interpolate.UnivariateSpline(self.wl, self.integrated_C, s=0)
self.flux_I = fI(self.wave)
self.flux_Q = fQ(self.wave)
self.flux_U = fU(self.wave)
self.flux_V = fV(self.wave)
self.flux_C = fC(self.wave)
def disko(self):
r2d = 180.0/numpy.pi
final_I = numpy.zeros(len(self.wl))
final_Q = numpy.zeros(len(self.wl))
final_U = numpy.zeros(len(self.wl))
final_V = numpy.zeros(len(self.wl))
final_C = numpy.zeros(len(self.wl))
total_weight = 0.0
T_I = numpy.matrix([[1.0, 0.0, 0.0],
[0.0, numpy.cos(self.inclination), numpy.sin(self.inclination)],
[0.0, -numpy.sin(self.inclination), numpy.cos(self.inclination)]])
emergent_vector = numpy.matrix([1.0, 0.0, 0.0])
for tile in zip(self.I, self.Q, self.U, self.V, self.C, self.ang_info):
azimuth = tile[5].az
n_az_steps = int(azimuth[2]*r2d-azimuth[1]*r2d)
azs = azimuth[1]+(numpy.arange(n_az_steps)+0.5)*(azimuth[2]-
azimuth[1])/n_az_steps
az1 = azimuth[1]+(numpy.arange(n_az_steps))*(azimuth[2]-
azimuth[1])/n_az_steps
az2 = azimuth[1]+(numpy.arange(n_az_steps)+1.0)*(azimuth[2]-
azimuth[1])/n_az_steps
longitude = tile[5].longitude
dphi = longitude[1]
n_phi_steps = int(dphi*r2d)
phis = longitude[0]-dphi/2.0+(numpy.arange(n_phi_steps)+
0.5)*dphi/n_phi_steps
for az in zip(azs, az1, az2):
T_rho = numpy.matrix([[0.0, 0.0, 1.0],
[-numpy.cos(az[0]), numpy.sin(az[0]), 0.0],
[numpy.sin(az[0]), numpy.cos(az[0]), 0.0]])
daz = numpy.sin(az[2])-numpy.sin(az[1])
area = daz*dphi/n_phi_steps
for phi in phis:
T_eta = numpy.matrix([
[numpy.cos(phi), -numpy.sin(phi), 0.0],
[numpy.sin(phi), numpy.cos(phi), 0.0],
[0.0, 0.0, 1.0]])
surface_vector = T_I*T_eta*T_rho*emergent_vector.T
mu = surface_vector.A[2][0]
if (mu > 0.00001):
projected_area = area*mu#/(4.0*pi)
limb_darkening = (1.0-(1.0-mu**self.alpha))
weight = projected_area*limb_darkening
total_weight += weight
final_I = final_I + weight*tile[0]/tile[4]
final_Q = final_Q + weight*tile[1]/tile[4]
final_U = final_U + weight*tile[2]/tile[4]
final_V = final_V + weight*tile[3]/tile[4]
final_C = final_C + weight*tile[4]
self.integrated_I = final_I/total_weight
self.integrated_Q = final_Q/total_weight
self.integrated_U = final_U/total_weight
self.integrated_V = final_V/total_weight
self.integrated_C = final_C/total_weight
def save(self, outfile):
SpectralTools.write_2col_spectrum(outfile+'.I', self.wave, self.flux_I)
SpectralTools.write_2col_spectrum(outfile+'.Q', self.wave, self.flux_Q)
SpectralTools.write_2col_spectrum(outfile+'.U', self.wave, self.flux_U)
SpectralTools.write_2col_spectrum(outfile+'.V', self.wave, self.flux_V)
SpectralTools.write_2col_spectrum(outfile+'.C', self.wave, self.flux_C)
class MoogStokes_IV_Spectrum( object ):
#"""
def __init__(self, name='', memory=False, **kwargs):
self.memory = memory
if self.memory:
self.parent = kwargs["PARENT"]
self.deltav = self.parent.deltav
self.vsini = self.parent.vsini
else:
self.name = name
if "DIR" in kwargs.keys():
self.directory = kwargs["DIR"]
else:
self.directory = '../'
if "DELTAV" in kwargs.keys():
self.deltav = kwargs["DELTAV"]
else:
self.deltav = 0.1 # wl spacing in km/s
if "VSINI" in kwargs.keys():
self.vsini = kwargs["VSINI"]
else:
self.vsini = 0.0
self.angle_file = self.directory+self.name+'.angles'
self.continuum_file = self.directory+self.name+'.continuum'
self.I_file = self.directory+self.name+'.spectrum_I'
self.V_file = self.directory+self.name+'.spectrum_V'
self.nangles = 0
self.phi = []
self.mu = []
self.wl = []
self.I = []
self.V = []
self.continuum = []
self.loadAngles()
self.loadSpectra()
self.interpolateSpectra()
self.diskInt()
#"""
def loadAngles(self):
if self.memory:
self.phi = self.parent.phi_angle[:self.parent.ncells]
self.mu = self.parent.mus[:self.parent.ncells]
else:
df = open(self.angle_file, 'r')
for line in df:
l = line.split()
if len(l) == 1:
self.nangles = int(l[0])
else:
self.phi.append(float(l[1]))
self.mu.append(float(l[2]))
self.phi = numpy.array(self.phi)
self.mu = numpy.array(self.mu)
def loadSpectra(self):
if self.memory:
self.I = numpy.array(self.parent.flux_I)/numpy.array(self.parent.continuum)
self.V = numpy.array(self.parent.flux_V)/numpy.array(self.parent.continuum)
self.continuum = numpy.array(self.parent.continuum)
self.wl = numpy.array(self.parent.wave)
else:
df_I = open(self.I_file, 'r')
df_V = open(self.V_file, 'r')
df_C = open(self.continuum_file, 'r')
continuum = []
I = []
V = []
wl = []
for line in df_C:
l = line.split()
wl.append(float(l[0]))
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Warning! Crazy Continuum format!", fluxes)
a.append(float(0.0))
continuum.append(a)
for line in df_I:
l = line.split()
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Woah there pardner! Crazy format - Stokes I!", fluxes)
a.append(float(0.0))
I.append(a)
for line in df_V:
l = line.split()
a = []
for fluxes in l[1:]:
try:
a.append(float(fluxes))
except:
print("Woah there pardner! Crazy format - Stokes V!", fluxes)
a.append(float(0.0))
V.append(a)
self.wl = numpy.array(wl)
I = numpy.array(I)
V = numpy.array(V)
continuum = numpy.array(continuum)
self.continuum = continuum.transpose()
self.I = I.transpose()/self.continuum
self.V = V.transpose()/self.continuum
wave = numpy.mean(self.wl)
if ((1.0/(wave/10000.0)) < 2.4):
self.alpha = -0.023 + 0.292/(wave/10000.0)
else:
self.alpha = -0.507 + 0.441/(wave/10000.0)
def interpolateSpectra(self):
deltav = self.deltav
c = 3e5 #km/s
wl_start = numpy.min(self.wl)
wl_max = numpy.max(self.wl)
new_wl = []
new_wl.append(wl_start)
while new_wl[-1] < wl_max:
d_lambda = new_wl[-1]*deltav/c
new_wl.append(new_wl[-1]+d_lambda)
self.new_wl = numpy.array(new_wl[0:-1])
new_I = []
new_V = []
for I,V in zip(self.I, self.V):
fI = scipy.interpolate.UnivariateSpline(self.wl, I, s=0)
fV = scipy.interpolate.UnivariateSpline(self.wl, V, s=0)
new_I.append(fI(self.new_wl))
new_V.append(fV(self.new_wl))
self.new_I = numpy.array(new_I)
self.new_V = numpy.array(new_V)
def diskInt(self):
deltav = self.deltav
vsini = self.vsini
c = 3e5
limb_darkening = []
for i in range(len(self.mu)):
limb_darkening.append(1.0-(1.0-self.mu[i]**(self.alpha)))
self.limb_darkening = numpy.array(limb_darkening)
continuum = []
for i in range(len(self.mu)):
self.new_I[i] *= self.limb_darkening[i]
self.new_V[i] *= self.limb_darkening[i]
continuum.append(numpy.ones(len(self.new_I[i]))
*self.limb_darkening[i])
continuum = numpy.array(continuum)
self.final_spectrum_I = self.rtint(self.mu, self.new_I,
continuum, deltav, vsini, 0.0)
self.final_spectrum_V = self.rtint(self.mu, self.new_V,
continuum, deltav, vsini, 0.0)
def save(self, outfile):
SpectralTools.write_2col_spectrum(outfile, self.new_wl, self.final_spectrum_I)
def rtint(self, mu, inten, cont, deltav, vsini_in, vrt_in, **kwargs):
"""
This is a python translation of Jeff Valenti's disk integration routine
PURPOSE:
Produces a flux profile by integrating intensity profiles (sampled
at various mu angles) over the visible stellar surface.
Calling Sequence:
flux = rtint(mu, inten, deltav, vsini, vrt)
INPUTS:
MU: list of length nmu cosine of the angle between the outward normal
and the line of sight for each intensity spectrum INTEN
INTEN: list (of length nmu) numpy arrays (each of length npts)
intensity spectra at specified values of MU
DELTAV: (scalar) velocity spacing between adjacent spectrum points in
INTEN (same units as VSINI and VRT)
VSIN (scalar) maximum radial velocity, due to solid-body rotation
VRT (scalar) radial-tangential macroturbulence parameter, i.e.. sqrt(2)
times the standard deviation of a Gaussian distribution of
turbulent velocities. The same distribution function describes
the raidal motions of one component and the tangential motions of
a second component. Each component covers half the stellar surface.
See "Observation and Analysis of Stellar Photospheres" by Gray.
INPUT KEYWORDS:
OSAMP: (scalar) internal oversamping factor for the convolutions. By
default, convolutions are done using the input points (OSAMP=1),
but when OSAMP is set to higher integer values, the input spectra
are first oversamping via cubic spline interpolation.
OUTPUTS:
function value: numpy array of length npts producing the disk-integrated
flux profile.
RESTRICTIONS:
Intensity profiles are weighted by the fraction of the projected stellar
surface they represent, apportioning the area between adjacent MU
points equally. Additional weights (such as those used in a Gauss-
Legendre quadrature) cannot meaningfully be used in this scheme.
About twice as many points are required with this scheme to achieve
the same precision of Gauss-Legendre quadrature.
DELTAV, VSINI, and VRT must all be in the same units (e.q. km/s).
If specified, OSAMP should be a positive integer
AUTHOR'S REQUEST:
If you use this algorithm in work that you publish, please cite...
MODIFICATION HISTORY:
Feb 88 GM Created ANA version
13 Oct 92 JAV Adapted from G. Marcy's ANA routine of same name
03 Nov 93 JAV Switched to annular convolution technique
12 Nov 93 JAV Fixed bug. Intensity components not added when vsini=0
14 Jun 94 JAV Reformatted for "public" release. Heavily commented.
Pass deltav instead of 2.998d5/deltav. Added osamp
keyword. Added rebinning logic and end of routine.
Changed default osamp from 3 to 1.
20 Feb 95 JAV Added mu as an argument to handle arbitrary mu sampling
and remove ambiguity in intensity profile ordering.
Interpret VTURB as sqrt(2)*sigma instead of just sigma
Replaced call_external with call to spl_{init|interp}.
03 Apr 95 JAV Multiply flux by !pi to give observed flux.
24 Oct 95 JAV Force "nmk" padding to be at least 3 pixels
18 Dec 95 JAV Renamed from dkint() to rtint(). No longer make local
copy of intensities. Use radial-tangential instead of
isotropic Gaussian macroturbulence.
26 Jan 99 JAV For NMU=1 and VSINI=0, assume resolved solar surface;
apply R-T macro, but supress vsini broadening.
01 Apr 99 GMH Use annuli weights, rather than assuming equal area.
27 Feb 13 CPD Translated to Python
"""
#make local copies of various input vars, which will be altered below
vsini = float(vsini_in)
vrt = float(vrt_in)
if "OSAMP" in kwargs:
os = max(round(kwargs["OSAMP"]), 1)
else:
os = 1
#Convert input MU to proj. radii, R of annuli for star of unit radius
#(which is just sine rather than cosine of the angle between the outward
#normal and the LOS)
rmu = numpy.sqrt(1.0-mu**2)
#Sort the proj. radii and corresponding intensity spectra into ascending
#order (i.e. from disk center to limb), which is equivalent to sorting
#MU in decending order
order = numpy.argsort(rmu)
rmu = rmu[order]
nmu = len(mu)
if (nmu == 1):
vsini = 0.0
#Calculate the proj. radii for boundaries of disk integration annuli.
#The n+1 boundaries are selected so that r(i+1) exactly bisects the area
#between rmu(i) and rmu(i+1). The innermost boundary, r(0) is set to 0
#(Disk center) and the outermost boundary r(nmu) is set to to 1 (limb).
if ((nmu > 1) | (vsini != 0)):
r = numpy.sqrt(0.5*(rmu[0:-1]**2.0+rmu[1:]**2.0)).tolist()
r.insert(0, 0.0)
r.append(1.0)
r = numpy.array(r)
#Calculate integration weights for each disk integration annulus. The
#weight is given by the relative area of each annulus, normalized such
#that the sum of all weights is unity. Weights for limb darkening are
#included explicitly in intensity profiles, so they aren't needed here.
wt = r[1:]**2.0 - r[0:-1]**2.0
else:
wt = numpy.array([1.0])
#Generate index vectors for input and oversampled points. Note that the
#oversampled indicies are carefully chosen such that every "os" finely
#sampled points fit exactly into one input bin. This makes it simple to
#"integrate" the finely sampled points at the end of the routine.
npts = len(inten[0])
xpix = numpy.arange(npts)
nfine = os*npts
xfine = 0.5/os * 2.0*numpy.arange(nfine)-os+1
#Loop through annuli, constructing and convolving with rotation kernels.
dummy = 0
yfine = numpy.zeros(nfine)
cfine = numpy.zeros(nfine)
flux = numpy.zeros(nfine)
continuum = numpy.zeros(nfine)
for m, y, c, w, i in zip(mu, inten, cont, wt, range(nmu)):
#use cubic spline routine to make an oversampled version of the
#intensity profile for the current annulus.
if os== 1:
yfine = y.copy()
cfine = c.copy()
else:
yspl = scipy.interpolate.splrep(xpix, y)
cspl = scipy.interpolate.splref(xpix, c)
yfine = scipy.interpolate.splev(yspl, xfine)
cfine = scipy.interpolate.splev(cspl, xfine)
# Construct the convolution kernel which describes the distribution of
# rotational velocities present in the current annulus. The distribution
# has been derived analyitically for annuli of arbitrary thickness in a
# rigidly rotating star. The kernel is constructed in two places: one
# piece for radial velocities less than the maximum velocity along the
# inner edge of annulus, and one piece for velocities greater than this
# limit.
if vsini > 0:
r1 = r[i]
r2 = r[i+1]
dv = deltav/os
maxv = vsini * r2
nrk = 2*long(maxv/dv) + 3
v = dv * (numpy.arange(nrk) - ((nrk-1)/2.))
rkern = numpy.zeros(nrk)
j1 = scipy.where(abs(v) < vsini*r1)
if len(j1[0]) > 0:
rkern[j1] = (numpy.sqrt((vsini*r2)**2 - v[j1]**2)-
numpy.sqrt((vsini*r1)**2 - v[j1]**2))
j2 = scipy.where((abs(v) >= vsini*r1) & (abs(v) <= vsini*r2))
if len(j2[0]) > 0:
rkern[j2] = numpy.sqrt((vsini*r2)**2 - v[j2]**2)
rkern = rkern / rkern.sum() # normalize kernel
# Convolve the intensity profile with the rotational velocity kernel for
# this annulus. Pad end of each profile with as many points as are in
# the convolution kernel, reducing Fourier ringing. The convolution
# may also be done with a routine called "externally" which efficiently
# shifts and adds.
if nrk > 3:
yfine = scipy.convolve(yfine, rkern, mode='same')
cfine = scipy.convolve(cfine, rkern, mode='same')
# Calc projected simga for radial and tangential velocity distributions.
sigma = os*vrt/numpy.sqrt(2.0) /deltav
sigr = sigma * m
sigt = sigma * numpy.sqrt(1.0 - m**2.)
# Figure out how many points to use in macroturbulence kernel
nmk = max(min(round(sigma*10), (nfine-3)/2), 3)
# Construct radial macroturbulence kernel w/ sigma of mu*VRT/sqrt(2)
if sigr > 0:
xarg = (numpy.range(2*nmk+1)-nmk) / sigr # exponential arg
mrkern = numpy.exp(max((-0.5*(xarg**2)),-20.0))
mrkern = mrkern/mrkern.sum()
else:
mrkern = numpy.zeros(2*nmk+1)
mrkern[nmk] = 1.0 #delta function
# Construct tangential kernel w/ sigma of sqrt(1-mu**2)*VRT/sqrt(2.)
if sigt > 0:
xarg = (numpy.range(2*nmk+1)-nmk) /sigt
mtkern = exp(max((-0.5*(xarg**2)), -20.0))
mtkern = mtkern/mtkern.sum()
else:
mtkern = numpy.zeros(2*nmk+1)
mtkern[nmk] = 1.0
# Sum the radial and tangential components, weighted by surface area
area_r = 0.5
area_t = 0.5
mkern = area_r*mrkern + area_t*mtkern
# Convolve the total flux profiles, again padding the spectrum on both ends
# to protect against Fourier rinnging.
yfine = scipy.convolve(yfine, mkern, mode='same')
cfine = scipy.convolve(cfine, mkern, mode='same')
# Add contribution from current annulus to the running total
flux += w*yfine
continuum += w*cfine
return flux/continuum
|
JNMcLane/MoogPy
|
MoogTools/MoogTools.py
|
Python
|
mit
| 95,515
|
[
"Gaussian"
] |
0a4984df4d68d5d0d22a71e1713f69733554d5ffe9f2ff745375b7ed24de8482
|
# Copyright (C) 2011, 2012, 2013, 2015 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Utility functions/objects that don't have a good home elsewhere."""
import PISM
import time
import sys
def writeProvenance(outfile, message=None):
"""Saves the time and command line arguments (or the provided `message`) to
the ``history`` attribute of the :file:`.nc` file `outfile`"""
rank = PISM.Context().rank
if rank == 0:
nc = PISM.netCDF.Dataset(outfile, 'a') # append
if message is None:
message = time.asctime() + ': ' + ' '.join(sys.argv)
if 'history' in nc.ncattrs():
nc.history = message + '\n' + nc.history
else:
nc.history = message
nc.source = "PISM " + PISM.PISM_Revision
nc.close()
PISM.Context().com.barrier()
def fileHasVariable(filename, varname):
"""Returns ``True`` if the :file:`.nc` file `filename` contains an attribute named `varname`."""
try:
ds = PISM.netCDF.Dataset(filename)
return varname in ds.variables
finally:
ds.close()
# The following was copied from matplotlib, which copied a python recipe.
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def has_key(self, k):
"Return True if this Bunch has a given key."
return self.__dict__.has_key(k)
def __getitem__(self, k):
return self.__dict__.get(k)
def update(self, **kwds):
"Update contents of a Bunch using key-value pairs."
self.__dict__.update(**kwds)
def __repr__(self):
keys = self.__dict__.keys()
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k]) for k in keys])
|
talbrecht/pism_pik07
|
site-packages/PISM/util.py
|
Python
|
gpl-3.0
| 2,829
|
[
"NetCDF"
] |
1fa5c733718f12ee3092a338eec85fcddda0ecc0362fe92a4701e5cc2b5b9ac8
|
def read_aims(filename):
"""Import FHI-aims geometry type files.
Reads unitcell, atom positions and constraints from
a geometry.in file.
"""
from ase import Atoms
from ase.constraints import FixAtoms, FixCartesian
import numpy as np
atoms = Atoms()
fd = open(filename, 'r')
lines = fd.readlines()
fd.close()
positions = []
cell = []
symbols = []
magmoms = []
fix = []
fix_cart = []
xyz = np.array([0, 0, 0])
i = -1
n_periodic = -1
periodic = np.array([False, False, False])
for n, line in enumerate(lines):
inp = line.split()
if inp == []:
continue
if inp[0] == 'atom':
if xyz.all():
fix.append(i)
elif xyz.any():
fix_cart.append(FixCartesian(i, xyz))
floatvect = float(inp[1]), float(inp[2]), float(inp[3])
positions.append(floatvect)
symbols.append(inp[-1])
i += 1
xyz = np.array([0, 0, 0])
elif inp[0] == 'lattice_vector':
floatvect = float(inp[1]), float(inp[2]), float(inp[3])
cell.append(floatvect)
n_periodic = n_periodic + 1
periodic[n_periodic] = True
elif inp[0] == 'initial_moment':
magmoms.append(float(inp[1]))
if inp[0] == 'constrain_relaxation':
if inp[1] == '.true.':
fix.append(i)
elif inp[1] == 'x':
xyz[0] = 1
elif inp[1] == 'y':
xyz[1] = 1
elif inp[1] == 'z':
xyz[2] = 1
if xyz.all():
fix.append(i)
elif xyz.any():
fix_cart.append(FixCartesian(i, xyz))
atoms = Atoms(symbols, positions)
if len(magmoms) > 0:
atoms.set_initial_magnetic_moments(magmoms)
if periodic.any():
atoms.set_cell(cell)
atoms.set_pbc(periodic)
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
return atoms
def write_aims(filename, atoms, ghosts=None):
"""Method to write FHI-aims geometry files.
Writes the atoms positions and constraints (only FixAtoms is
supported at the moment).
"""
from ase.constraints import FixAtoms, FixCartesian
import numpy as np
if isinstance(atoms, (list, tuple)):
if len(atoms) > 1:
raise RuntimeError("Don't know how to save more than "+
"one image to FHI-aims input")
else:
atoms = atoms[0]
fd = open(filename, 'w')
fd.write('#=======================================================\n')
fd.write('#FHI-aims file: '+filename+'\n')
fd.write('#Created using the Atomic Simulation Environment (ASE)\n')
fd.write('#=======================================================\n')
i = 0
if atoms.get_pbc().any():
for n, vector in enumerate(atoms.get_cell()):
fd.write('lattice_vector ')
for i in range(3):
fd.write('%16.16f ' % vector[i])
fd.write('\n')
fix_cart = np.zeros([len(atoms),3])
if atoms.constraints:
for constr in atoms.constraints:
if isinstance(constr, FixAtoms):
fix_cart[constr.index] = [1,1,1]
elif isinstance(constr, FixCartesian):
fix_cart[constr.a] = -constr.mask+1
if ghosts is None:
ghosts = np.zeros(len(atoms))
else:
assert len(ghosts) == len(atoms)
for i, atom in enumerate(atoms):
if ghosts[i] == 1:
atomstring = 'empty '
else:
atomstring = 'atom '
fd.write(atomstring)
for pos in atom.position:
fd.write('%16.16f ' % pos)
fd.write(atom.symbol)
fd.write('\n')
# (1) all coords are constrained:
if fix_cart[i].all():
fd.write('constrain_relaxation .true.\n')
# (2) some coords are constrained:
elif fix_cart[i].any():
xyz = fix_cart[i]
for n in range(3):
if xyz[n]:
fd.write('constrain_relaxation %s\n' % 'xyz'[n])
if atom.charge:
fd.write('initial_charge %16.6f\n' % atom.charge)
if atom.magmom:
fd.write('initial_moment %16.6f\n' % atom.magmom)
# except KeyError:
# continue
def read_energy(filename):
for line in open(filename, 'r'):
if line.startswith(' | Total energy corrected'):
E = float(line.split()[-2])
return E
def read_aims_output(filename, index = -1):
""" Import FHI-aims output files with all data available, i.e. relaxations,
MD information, force information etc etc etc. """
from ase import Atoms, Atom
from ase.calculators.singlepoint import SinglePointCalculator
from ase.units import Ang, fs
from ase.constraints import FixAtoms, FixCartesian
molecular_dynamics = False
fd = open(filename, 'r')
cell = []
images = []
fix = []
fix_cart = []
n_periodic = -1
f = None
pbc = False
found_aims_calculator = False
v_unit = Ang/(1000.0*fs)
while True:
line = fd.readline()
if not line:
break
if "List of parameters used to initialize the calculator:" in line:
fd.readline()
calc = read_aims_calculator(fd)
calc.out = filename
found_aims_calculator = True
if "| Number of atoms :" in line:
inp = line.split()
n_atoms = int(inp[5])
if "| Unit cell:" in line:
if not pbc:
pbc = True
for i in range(3):
inp = fd.readline().split()
cell.append([inp[1],inp[2],inp[3]])
if "Found relaxation constraint for atom" in line:
xyz = [0, 0, 0]
ind = int(line.split()[5][:-1])-1
if "All coordinates fixed" in line:
if ind not in fix:
fix.append(ind)
if "coordinate fixed" in line:
coord = line.split()[6]
constr_ind = 0
if coord == 'x':
xyz[0] = 1
elif coord == 'y':
xyz[1] = 1
elif coord == 'z':
xyz[2] = 1
keep = True
for n,c in enumerate(fix_cart):
if ind == c.a:
keep = False
constr_ind = n
if keep:
fix_cart.append(FixCartesian(ind, xyz))
else:
fix_cart[n].mask[xyz.index(1)] = 0
if "Atomic structure:" in line and not molecular_dynamics:
fd.readline()
atoms = Atoms()
for i in range(n_atoms):
inp = fd.readline().split()
atoms.append(Atom(inp[3],(inp[4],inp[5],inp[6])))
if "Complete information for previous time-step:" in line:
molecular_dynamics = True
if "Updated atomic structure:" in line and not molecular_dynamics:
fd.readline()
atoms = Atoms()
velocities = []
for i in range(n_atoms):
inp = fd.readline().split()
if 'lattice_vector' in inp[0]:
cell = []
for i in range(3):
cell += [[float(inp[1]),float(inp[2]),float(inp[3])]]
inp = fd.readline().split()
atoms.set_cell(cell)
inp = fd.readline().split()
atoms.append(Atom(inp[4],(inp[1],inp[2],inp[3])))
if molecular_dynamics:
inp = fd.readline().split()
if "Atomic structure (and velocities)" in line:
fd.readline()
atoms = Atoms()
velocities = []
for i in range(n_atoms):
inp = fd.readline().split()
atoms.append(Atom(inp[4],(inp[1],inp[2],inp[3])))
inp = fd.readline().split()
velocities += [[float(inp[1])*v_unit,float(inp[2])*v_unit,float(inp[3])*v_unit]]
atoms.set_velocities(velocities)
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
images.append(atoms)
if "Total atomic forces" in line:
f = []
for i in range(n_atoms):
inp = fd.readline().split()
f.append([float(inp[2]),float(inp[3]),float(inp[4])])
if not found_aims_calculator:
e = images[-1].get_potential_energy()
images[-1].set_calculator(SinglePointCalculator(atoms,
energy=e,
forces=f))
e = None
f = None
if "Total energy corrected" in line:
e = float(line.split()[5])
if pbc:
atoms.set_cell(cell)
atoms.pbc = True
if not found_aims_calculator:
atoms.set_calculator(SinglePointCalculator(atoms, energy=e))
if not molecular_dynamics:
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
images.append(atoms)
e = None
if found_aims_calculator:
calc.set_results(images[-1])
images[-1].set_calculator(calc)
fd.close()
if molecular_dynamics:
images = images[1:]
# return requested images, code borrowed from ase/io/trajectory.py
if isinstance(index, int):
return images[index]
else:
step = index.step or 1
if step > 0:
start = index.start or 0
if start < 0:
start += len(images)
stop = index.stop or len(images)
if stop < 0:
stop += len(images)
else:
if index.start is None:
start = len(images) - 1
else:
start = index.start
if start < 0:
start += len(images)
if index.stop is None:
stop = -1
else:
stop = index.stop
if stop < 0:
stop += len(images)
return [images[i] for i in range(start, stop, step)]
|
suttond/MODOI
|
ase/io/aims.py
|
Python
|
lgpl-3.0
| 10,742
|
[
"ASE",
"FHI-aims"
] |
5c29aeaa58cfb7ca4eda16ba3cf150d669037f70f3f58a6720661b26c210f59e
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto, lib
from pyscf import scf
from pyscf.prop import nmr
from pyscf.data import nist
nist.ALPHA = 1./137.03599967994
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
[1 , (0. , 0. , .917)],
["F" , (0. , 0. , 0.)], ]
#mol.nucmod = {"F":2, "H":2}
mol.basis = {"H": 'cc_pvdz',
"F": 'cc_pvdz',}
mol.build()
nrhf = scf.RHF(mol)
nrhf.conv_tol_grad = 1e-6
nrhf.conv_tol = 1e-12
nrhf.scf()
rhf = scf.dhf.RHF(mol)
rhf.conv_tol_grad = 1e-7
rhf.conv_tol = 1e-12
rhf.scf()
def finger(mat):
return abs(mat).sum()
class KnowValues(unittest.TestCase):
def test_nr_common_gauge_ucpscf(self):
m = nmr.RHF(nrhf)
m.cphf = False
m.gauge_orig = (1,1,1)
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1636.7413165636394, 5)
def test_nr_common_gauge_cpscf(self):
m = nmr.RHF(nrhf)
m.cphf = True
m.gauge_orig = (1,1,1)
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1562.3859473764551, 5)
def test_nr_giao_ucpscf(self):
m = nmr.RHF(nrhf)
m.cphf = False
m.gauge_orig = None
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1488.0948832784416, 5)
def test_nr_giao_cpscf(self):
m = nmr.RHF(nrhf)
m.cphf = True
m.gauge_orig = None
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1358.9826064972372, 5)
def test_rmb_common_gauge_ucpscf(self):
m = nmr.DHF(rhf)
m.cphf = False
m.gauge_orig = (1,1,1)
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1642.1872658333457, 4)
def test_rmb_common_gauge_cpscf(self):
m = nmr.DHF(rhf)
m.cphf = True
m.gauge_orig = (1,1,1)
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1569.0400642905215, 4)
def test_rmb_giao_ucpscf(self):
m = nmr.DHF(rhf)
m.cphf = False
m.gauge_orig = None
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1493.7229929087348, 4)
def test_rmb_giao_cpscf_high_cost(self):
m = nmr.DHF(rhf)
m.cphf = True
m.gauge_orig = None
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1365.4679007423506, 4)
def test_rkb_giao_cpscf(self):
m = nmr.DHF(rhf)
m.mb = 'RKB'
m.cphf = True
m.gauge_orig = None
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1923.9092803444623, 4)
def test_rkb_common_gauge_cpscf(self):
m = nmr.DHF(rhf)
m.mb = 'RKB'
m.cphf = True
m.gauge_orig = (1,1,1)
msc = m.shielding()
self.assertAlmostEqual(finger(msc), 1980.1179936444073, 4)
def test_make_h10(self):
nao = mol.nao_nr()
numpy.random.seed(1)
dm0 = numpy.random.random((nao,nao))
dm0 = dm0 + dm0.T
h1 = nmr.rhf.make_h10(mol, dm0)
self.assertAlmostEqual(numpy.linalg.norm(h1), 21.255203821714673, 8)
h1 = nmr.rhf.make_h10(mol, dm0, gauge_orig=(0,0,0))
self.assertAlmostEqual(numpy.linalg.norm(h1), 4.020198783142229, 8)
n4c = mol.nao_2c()*2
numpy.random.seed(1)
dm0 = numpy.random.random((n4c,n4c))
dm0 = dm0 + dm0.T.conj()
h1 = nmr.dhf.make_h10(mol, dm0)
self.assertAlmostEqual(numpy.linalg.norm(h1), 73.452535645731714, 8)
h1 = nmr.dhf.make_h10(mol, dm0, gauge_orig=(0,0,0), mb='RKB')
self.assertAlmostEqual(numpy.linalg.norm(h1), 7.3636964305440609, 8)
if __name__ == "__main__":
print("Full Tests of RHF-MSC DHF-MSC for HF")
unittest.main()
|
gkc1000/pyscf
|
pyscf/prop/nmr/test/test_hf_msc.py
|
Python
|
apache-2.0
| 4,358
|
[
"PySCF"
] |
648d2591875b85bf00a910cb63dff322104e839c4d822fef0de6bd13180c9c21
|
#
# Copyright (C) 2004-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import argparse
import csv
import os
import sys
from rdkit import Chem
def Convert(suppl, outFile, keyCol=None, stopAfter=-1, includeChirality=False, smilesFrom=''):
w = csv.writer(outFile)
mol = suppl[0]
propNames = list(mol.GetPropNames())
if keyCol and keyCol in propNames:
propNames.remove(keyCol)
outL = []
if keyCol:
outL.append(keyCol)
outL.append('SMILES')
outL.extend(propNames)
w.writerow(outL)
for nDone, mol in enumerate(suppl, 1):
if not mol:
continue
if not smilesFrom or not mol.HasProp(smilesFrom):
smi = Chem.MolToSmiles(mol, isomericSmiles=includeChirality)
else:
smi = mol.GetProp(smilesFrom)
tMol = Chem.MolFromSmiles(smi)
smi = Chem.MolToSmiles(tMol, isomericSmiles=includeChirality)
outL = []
if keyCol:
outL.append(str(mol.GetProp(keyCol)))
outL.append(smi)
for prop in propNames:
if mol.HasProp(prop):
outL.append(str(mol.GetProp(prop)))
else:
outL.append('')
w.writerow(outL)
if nDone == stopAfter:
break
return
def initParser():
""" Initialize the parser for the CLI """
parser = argparse.ArgumentParser(description='Convert SDF file to CSV',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--key', '-k', metavar='keyCol', default=None, dest='keyCol')
parser.add_argument('--chiral', default=False, action='store_true', dest='useChirality')
parser.add_argument('--smilesCol', metavar='smilesCol', default='')
parser.add_argument('inFilename', metavar='inFile.sdf', type=existingFile)
parser.add_argument('outF', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
return parser
def existingFile(filename):
""" 'type' for argparse - check that filename exists """
if not os.path.exists(filename):
raise argparse.ArgumentTypeError("{0} does not exist".format(filename))
return filename
def main():
""" Main application """
parser = initParser()
args = parser.parse_args()
suppl = Chem.SDMolSupplier(args.inFilename)
Convert(suppl, args.outF, keyCol=args.keyCol, includeChirality=args.useChirality,
smilesFrom=args.smilesCol)
if __name__ == '__main__':
main()
|
rvianello/rdkit
|
rdkit/Chem/ChemUtils/SDFToCSV.py
|
Python
|
bsd-3-clause
| 2,530
|
[
"RDKit"
] |
52edacc2c7f65f8ab8c0f69663c5c3dcade3fe2917f0dd5652bf3fc827ea6060
|
class ThreatExchange(object):
"""
General vocabulary for ThreatExchange.
"""
URL = 'https://graph.facebook.com/'
VERSION = 'v2.6/'
ACCESS_TOKEN = 'access_token'
DEFAULT_LIMIT = 25
# GET
MALWARE_ANALYSES = 'malware_analyses/'
MALWARE_FAMILIES = 'malware_families/'
THREAT_EXCHANGE_MEMBERS = 'threat_exchange_members/'
THREAT_DESCRIPTORS = 'threat_descriptors/'
THREAT_INDICATORS = 'threat_indicators/'
THREAT_PRIVACY_GROUPS = 'threat_privacy_groups/'
THREAT_PRIVACY_GROUPS_MEMBER = 'threat_privacy_groups_member/'
THREAT_PRIVACY_GROUPS_OWNER = 'threat_privacy_groups_owner/'
THREAT_TAGS = 'threat_tags/'
FIELDS = 'fields'
INCLUDE_EXPIRED = 'include_expired'
LIMIT = 'limit'
MAX_CONFIDENCE = 'max_confidence'
MIN_CONFIDENCE = 'min_confidence'
OWNER = 'owner'
REVIEW_STATUS = 'review_status'
SAMPLE_TYPE = 'sample_type'
SHARE_LEVEL = 'share_level'
SINCE = 'since'
SORT_ORDER = 'sort_order'
STATUS = 'status'
STRICT_TEXT = 'strict_text'
TEXT = 'text'
THREAT_TYPE = 'threat_type'
TYPE = 'type'
UNTIL = 'until'
DATA = 'data'
PAGING = 'paging'
NEXT = 'next'
ASCENDING = 'ASCENDING'
DESCENDING = 'DESCENDING'
METADATA = 'metadata'
NO_TOTAL = -1
MIN_TOTAL = 0
DEC_TOTAL = 1
# POST
REACTIONS = 'reactions'
RELATED = 'related'
RELATED_ID = 'related_id'
# BATCH
BATCH = 'batch'
INCLUDE_HEADERS = 'include_headers'
OMIT_RESPONSE_ON_SUCCESS = 'omit_response_on_success'
# Environment Variables for init()
TX_ACCESS_TOKEN = 'TX_ACCESS_TOKEN'
TX_APP_ID = 'TX_APP_ID'
TX_APP_SECRET = 'TX_APP_SECRET'
class Batch(object):
"""
Vocabulary used for batch operations.
"""
METHOD = 'method'
RELATIVE_URL = 'relative_url'
BODY = 'body'
INCLUDE_HEADERS = 'include_headers'
class Common(object):
"""
Vocabulary common to multiple objects.
"""
ADDED_ON = 'added_on'
ID = 'id'
METADATA = 'metadata'
MY_REACTIONS = 'my_reactions'
SHARE_LEVEL = 'share_level'
STATUS = 'status'
VICTIM_COUNT = 'victim_count'
class Connection(object):
"""
Vocabulary specific to searching for, creating, or removing connections
between objects.
"""
ADDED_ON = Common.ADDED_ON
CRX = 'crx'
DESCRIPTORS = 'descriptors'
DROPPED = 'dropped'
DROPPED_BY = 'dropped_by'
FAMILIES = 'families'
ID = Common.ID
MALWARE_ANALYSES = 'malware_analyses'
RELATED = 'related'
STATUS = Common.STATUS
THREAT_INDICATORS = 'threat_indicators'
VARIANTS = 'variants'
VICTIM_COUNT = Common.VICTIM_COUNT
class Malware(object):
"""
Vocabulary specific to searching for, creating, or modifying a Malware
object.
"""
ADDED_ON = Common.ADDED_ON
CRX = 'crx'
ID = Common.ID
IMPHASH = 'imphash'
MD5 = 'md5'
METADATA = Common.METADATA
PASSWORD = 'password'
PE_RICH_HEADER = 'pe_rich_header'
PRIVACY_TYPE = 'privacy_type'
SAMPLE = 'sample'
SAMPLE_SIZE = 'sample_size'
SAMPLE_SIZE_COMPRESSED = 'sample_size_compressed'
SAMPLE_TYPE = 'sample_type'
SHA1 = 'sha1'
SHA256 = 'sha256'
SHARE_LEVEL = Common.SHARE_LEVEL
SSDEEP = 'ssdeep'
STATUS = Common.STATUS
TAGS = 'tags'
VICTIM_COUNT = Common.VICTIM_COUNT
XPI = 'xpi'
class MalwareAnalysisTypes(object):
"""
Vocabulary specific to Malware Analysis Sample Types
"""
ANDROID_APK = 'ANDROID_APK'
CHROME_EXT = 'CHROME_EXT'
DALVIK_DEX = 'DALVIK_DEX'
ELF_X64 = 'ELF_X64'
ELF_X86 = 'ELF_X86'
FIREFOX_EXT = 'FIREFOX_EXT'
FLASH_DATA = 'FLASH_DATA'
FLASH_VIDEO = 'FLASH_VIDEO'
GENERIC_BINARY = 'GENERIC_BINARY'
GENERIC_IMAGE = 'GENERIC_IMAGE'
GENERIC_TEXT = 'GENERIC_TEXT'
HTML = 'HTML'
IMAGE_BMP = 'IMAGE_BMP'
IMAGE_GIF = 'IMAGE_GIF'
IMAGE_JPEG = 'IMAGE_JPEG'
IMAGE_PNG = 'IMAGE_PNG'
IMAGE_TIFF = 'IMAGE_TIFF'
IOS_APP = 'IOS_APP'
JAR_ARCHIVE = 'JAR_ARCHIVE'
JAVASCRIPT = 'JAVASCRIPT'
MACH_O = 'MACH_O'
OFFICE_DOCX = 'OFFICE_DOCX'
OFFICE_PPTX = 'OFFICE_PPTX'
OFFICE_XLSX = 'OFFICE_XLSX'
PE_X64 = 'PE_X64'
PE_X86 = 'PE_X86'
PDF_DOCUMENT = 'PDF_DOCUMENT'
RAR_ARCHIVE = 'RAR_ARCHIVE'
RTF_FILE = 'RTF_FILE'
UNKNOWN = 'UNKNOWN'
ZIP_ARCHIVE = 'ZIP_ARCHIVE'
class MalwareFamilies(object):
"""
Vocabulary specific to searching for Malware Family objects.
"""
ADDED_ON = Common.ADDED_ON
ALIASES = 'aliases'
DESCRIPTION = 'description'
FAMILY_TYPE = 'family_type'
ID = Common.ID
MALICIOUS = 'malicious'
NAME = 'name'
PRIVACY_TYPE = 'privacy_type'
SAMPLE_COUNT = 'sample_count'
SHARE_LEVEL = Common.SHARE_LEVEL
class Paging(object):
"""
Vocabulary for the fields available in a GET response specific to paging.
"""
PAGING = 'paging'
CURSORS = 'cursors'
NEXT = 'next'
class PagingCursor(object):
"""
Vocabulary for describing the paging cursor in a GET response.
"""
BEFORE = 'before'
AFTER = 'after'
class Reaction(object):
"""
Vocabulary for describing a reaction.
"""
HELPFUL = 'HELPFUL'
NOT_HELPFUL = 'NOT_HELPFUL'
OUTDATED = 'OUTDATED'
SAW_THIS_TOO = 'SAW_THIS_TOO'
WANT_MORE_INFO = 'WANT_MORE_INFO'
class Response(object):
"""
Vocabulary for describing server responses.
"""
SUCCESS = 'success'
ID = 'id'
ERROR = 'error'
MESSAGE = 'message'
TYPE = 'type'
CODE = 'code'
FBTRACE_ID = 'fbtrace_id'
class ThreatExchangeMember(object):
"""
Vocabulary for describing a ThreatExchangeMember.
"""
ID = Common.ID
NAME = 'name'
EMAIL = 'email'
class ThreatPrivacyGroup(object):
"""
Vocabulary for describing a ThreatPrivacyGroup.
"""
ID = Common.ID
NAME = 'name'
DESCRIPTION = 'description'
MEMBERS = 'members'
MEMBERS_CAN_SEE = 'members_can_see'
MEMBERS_CAN_USE = 'members_can_use'
class ThreatIndicator(object):
"""
Vocabulary specific to searching for, adding, or modifying a Threat
Indicator object.
"""
ID = Common.ID
INDICATOR = 'indicator'
METADATA = Common.METADATA
TYPE = 'type'
class ThreatDescriptor(object):
"""
Vocabulary specific to searching for, adding, or modifying a Threat
Indicator object.
"""
ADDED_ON = 'added_on'
ATTACK_TYPE = 'attack_type'
CONFIDENCE = 'confidence'
DESCRIPTION = 'description'
EXPIRED_ON = 'expired_on'
ID = Common.ID
INDICATOR = 'indicator'
LAST_UPDATED = 'last_updated'
METADATA = Common.METADATA
MY_REACTIONS = Common.MY_REACTIONS
OWNER = 'owner'
PRECISION = 'precision'
PRIVACY_MEMBERS = 'privacy_members'
PRIVACY_TYPE = 'privacy_type'
RAW_INDICATOR = 'raw_indicator'
REVIEW_STATUS = 'review_status'
SEVERITY = 'severity'
SHARE_LEVEL = Common.SHARE_LEVEL
SOURCE_URI = 'source_uri'
STATUS = Common.STATUS
TAGS = 'tags'
THREAT_TYPE = 'threat_type'
TYPE = 'type'
class ThreatTag(object):
"""
Vocabulary specific to searching for, adding, or modifying a Threat
Tag object.
"""
ID = Common.ID
OBJECTS = 'objects'
TAGGED_OBJECTS = 'tagged_objects'
TEXT = 'text'
class Attack(object):
"""
Vocabulary for the Threat Indicator Attack type.
"""
ACCESS_TOKEN_THEFT = 'ACCESS_TOKEN_THEFT'
BOGON = 'BOGON'
BOT = 'BOT'
BRUTE_FORCE = 'BRUTE_FORCE'
CLICKJACKING = 'CLICKJACKING'
COMPROMISED = 'COMPROMISED'
CREEPER = 'CREEPER'
DRUGS = 'DRUGS'
EMAIL_SPAM = 'EMAIL_SPAM'
EXPLICIT_CONTENT = 'EXPLICIT_CONTENT'
EXPLOIT_KIT = 'EXPLOIT_KIT'
FAKE_ACCOUNTS = 'FAKE_ACCOUNT'
FINANCIALS = 'FINANCIAL'
IP_INFRINGEMENT = 'IP_INFRINGEMENT'
MALICIOUS_APP = 'MALICIOUS_APP'
MALICIOUS_NAMESERVER = 'MALICIOUS_NAMESERVER'
MALICIOUS_WEBSERVER = 'MALICIOUS_WEBSERVER'
MALVERTISING = 'MALVERTISING'
MALWARE = 'MALWARE'
PASSIVE_DNS = 'PASSIVE_DNS'
PHISHING = 'PHISHING'
PIRACY = 'PIRACY'
PROXY = 'PROXY'
SCAM = 'SCAM'
SCANNING = 'SCANNING'
SCRAPING = 'SCRAPING'
SELF_XSS = 'SELF_XSS'
SHARE_BAITING = 'SHARE_BAITING'
TARGETED = 'TARGETED'
TERRORISM = 'TERRORISM'
WEAPONS = 'WEAPONS'
WEB_APP = 'WEB_APP'
UNKNOWN = 'UNKNOWN'
class MalwareFamily(object):
"""
Vocabulary for the Malware Family Type.
"""
AVSCAN = 'AVSCAN'
AV_SIGNATURE = 'AV_SIGNATURE'
BARF10 = 'BARF10'
FSH_HTML = 'FSH_HTML'
FSH_SSDEEP = 'FSH_SSDEEP'
IMP_HASH = 'IMP_HASH'
JS004 = 'JS004'
JS005 = 'JS005'
MANUAL = 'MANUAL'
PE_CERT_SHA256 = 'PE_CERT_SHA256'
PE_EXPORT = 'PE_EXPORT'
PE_RSRC_SHA256 = 'PE_RSRC_SHA256'
PE_SECTION_SHA256 = 'PE_SECTION_SHA256'
PE_TIMESTAMP = 'PE_TIMESTAMP'
PE_VERSION_VALUE = 'PE_VERSION_VALUE'
RICH_HEADER_HASH = 'RICH_HEADER_HASH'
SSDEEP_HASH = 'SSDEEP_HASH'
UNKNOWN = 'UNKNOWN'
YARA = 'YARA'
class Precision(object):
"""
Vocabulary for the Precision Type.
"""
UNKNOWN = 'UNKNOWN'
LOW = 'LOW'
MEDIUM = 'MEDIUM'
HIGH = 'HIGH'
class PrivacyType(object):
"""
Vocabulary for the Threat Indicator Privacy Type.
"""
HAS_PRIVACY_GROUP = 'HAS_PRIVACY_GROUP'
HAS_WHITELIST = 'HAS_WHITELIST'
NONE = 'NONE'
VISIBLE = 'VISIBLE'
class ReviewStatus(object):
"""
Vocabulary for the Review Status Type.
"""
UNKNOWN = 'UNKNOWN'
UNREVIEWED = 'UNREVIEWED'
PENDING = 'PENDING'
REVIEWED_MANUALLY = 'REVIEWED_MANUALLY'
REVIEWED_AUTOMATICALLY = 'REVIEWED_AUTOMATICALLY'
class Role(object):
"""
Vocabulary for the Threat Indicator Role type.
"""
BENEFACTOR = 'BENEFACTOR'
C2 = 'C2'
EXPLOIT = 'EXPLOIT'
RECON = 'RECON'
PHISHING_SITE = 'PHISHING_SITE'
TRACKING_PIXEL = 'TRACKING_PIXEL'
UNKNOWN = 'UNKNOWN'
WATERING_HOLE = 'WATERING_HOLE'
class Severity(object):
"""
Vocabulary for the available severity levels for a Threat Indicator.
Intentionally out of alphabetical order to reflect order of severity.
"""
UNKNOWN = 'UNKNOWN'
INFO = 'INFO'
WARNING = 'WARNING'
SUSPICIOUS = 'SUSPICIOUS'
SEVERE = 'SEVERE'
APOCALYPSE = 'APOCALYPSE'
class ShareLevel(object):
"""
Vocabulary for the share level of an object. This is based off of TLP.
"""
UNKNOWN = 'UNKNOWN'
WHITE = 'WHITE'
GREEN = 'GREEN'
AMBER = 'AMBER'
RED = 'RED'
class SignatureType(object):
"""
Vocabulary for the Threat Indicator Signature Threat Type.
"""
BRO = 'BRO'
REGEX_URL = 'REGEX_URL'
SNORT = 'SNORT'
SURICATA = 'SURICATA'
UNKNOWN = 'UNKNOWN'
YARA = 'YARA'
class Status(object):
"""
Vocabulary for the status of an object.
"""
MALICIOUS = 'MALICIOUS'
NON_MALICIOUS = 'NON_MALICIOUS'
SUSPICIOUS = 'SUSPICIOUS'
UNKNOWN = 'UNKNOWN'
class ThreatType(object):
"""
Vocabulary for the available Threat Types for a Threat Indicator.
"""
BAD_ACTOR = 'BAD_ACTOR'
COMPROMISED_CREDENTIAL = 'COMPROMISED_CREDENTIAL'
COMMAND_EXEC = 'COMMAND_EXEC'
HT_VICTIM = 'HT_VICTIM'
MALICIOUS_AD = 'MALICIOUS_AD'
MALICIOUS_API_KEY = 'MALICIOUS_API_KEY'
MALICIOUS_CONTENT = 'MALICIOUS_CONTENT'
MALICIOUS_DOMAIN = 'MALICIOUS_DOMAIN'
MALICIOUS_INJECT = 'MALICIOUS_INJECT'
MALICIOUS_IP = 'MALICIOUS_IP'
MALICIOUS_SSL_CERT = 'MALICIOUS_SSL_CERT'
MALICIOUS_SUBNET = 'MALICIOUS_SUBNET'
MALICIOUS_URL = 'MALICIOUS_URL'
MALICIOUS_URL_CHUNK = 'MALICIOUS_URL_CHUNK'
MALWARE_ARTIFACTS = 'MALWARE_ARTIFACTS'
MALWARE_SAMPLE = 'MALWARE_SAMPLE'
MALWARE_SIGNATURE = 'MALWARE_SIGNATURE'
MALWARE_VICTIM = 'MALWARE_VICTIM'
PROXY_IP = 'PROXY_IP'
SIGNATURE = 'SIGNATURE'
SINKHOLE_EVENT = 'SINKHOLE_EVENT'
SMS_SPAM = 'SMS_SPAM'
UNKNOWN = 'UNKNOWN'
VICTIM_IP_USAGE = 'VICTIM_IP_USAGE'
WEB_REQUEST = 'WEB_REQUEST'
WHITELIST_DOMAIN = 'WHITELIST_DOMAIN'
WHITELIST_IP = 'WHITELIST_IP'
WHITELIST_URL = 'WHITELIST_URL'
class Types(object):
"""
Vocabulary for the Threat Indicator Types.
"""
ADJUST_TOKEN = 'ADJUST_TOKEN'
API_KEY = 'API_KEY'
AS_NUMBER = 'AS_NUMBER'
BANNER = 'BANNER'
CMD_LINE = 'CMD_LINE'
COOKIE_NAME = 'COOKIE_NAME'
CRX = 'CRX'
DEBUG_STRING = 'DEBUG_STRING'
DEST_PORT = 'DEST_PORT'
DIRECTORY_QUERIED = 'DIRECTORY_QUERIED'
DOMAIN = 'DOMAIN'
EMAIL_ADDRESS = 'EMAIL_ADDRESS'
FILE_CREATED = 'FILE_CREATED'
FILE_DELETED = 'FILE_DELETED'
FILE_MOVED = 'FILE_MOVED'
FILE_NAME = 'FILE_NAME'
FILE_OPENED = 'FILE_OPENED'
FILE_READ = 'FILE_READ'
FILE_WRITTEN = 'FILE_WRITTEN'
GET_PARAM = 'GET_PARAM'
HASH_IMPHASH = 'HASH_IMPHASH'
HASH_MD5 = 'HASH_MD5'
HASH_SHA1 = 'HASH_SHA1'
HASH_SHA256 = 'HASH_SHA256'
HASH_SSDEEP = 'HASH_SSDEEP'
HTML_ID = 'HTML_ID'
HTTP_REQUEST = 'HTTP_REQUEST'
IP_ADDRESS = 'IP_ADDRESS'
IP_SUBNET = 'IP_SUBNET'
ISP = 'ISP'
LATITUDE = 'LATITUDE'
LAUNCH_AGENT = 'LAUNCH_AGENT'
LOCATION = 'LOCATION'
LONGITUDE = 'LONGITUDE'
MALWARE_NAME = 'MALWARE_NAME'
MEMORY_ALLOC = 'MEMORY_ALLOC'
MEMORY_PROTECT = 'MEMORY_PROTECT'
MEMORY_WRITTEN = 'MEMORY_WRITTEN'
MUTANT_CREATED = 'MUTANT_CREATED'
MUTEX = 'MUTEX'
NAME_SERVER = 'NAME_SERVER'
OTHER_FILE_OP = 'OTHER_FILE_OP'
PASSWORD = 'PASSWORD'
PASSWORD_SALT = 'PASSWORD_SALT'
PAYLOAD_DATA = 'PAYLOAD_DATA'
PAYLOAD_TYPE = 'PAYLOAD_TYPE'
POST_DATA = 'POST_DATA'
PROTOCOL = 'PROTOCOL'
REFERER = 'REFERER'
REGISTRAR = 'REGISTRAR'
REGISTRY_KEY = 'REGISTRY_KEY'
REG_KEY_CREATED = 'REG_KEY_CREATED'
REG_KEY_DELETED = 'REG_KEY_DELETED'
REG_KEY_ENUMERATED = 'REG_KEY_ENUMERATED'
REG_KEY_MONITORED = 'REG_KEY_MONITORED'
REG_KEY_OPENED = 'REG_KEY_OPENED'
REG_KEY_VALUE_CREATED = 'REG_KEY_VALUE_CREATED'
REG_KEY_VALUE_DELETED = 'REG_KEY_VALUE_DELETED'
REG_KEY_VALUE_MODIFIED = 'REG_KEY_VALUE_MODIFIED'
REG_KEY_VALUE_QUERIED = 'REG_KEY_VALUE_QUERIED'
SIGNATURE = 'SIGNATURE'
SOURCE_PORT = 'SOURCE_PORT'
TELEPHONE = 'TELEPHONE'
URI = 'URI'
USER_AGENT = 'USER_AGENT'
VOLUME_QUERIED = 'VOLUME_QUERIED'
WEBSTORAGE_KEY = 'WEBSTORAGE_KEY'
WEB_PAYLOAD = 'WEB_PAYLOAD'
WHOIS_NAME = 'WHOIS_NAME'
WHOIS_ADDR1 = 'WHOIS_ADDR1'
WHOIS_ADDR2 = 'WHOIS_ADDR2'
XPI = 'XPI'
|
RyPeck/ThreatExchange
|
pytx/pytx/vocabulary.py
|
Python
|
bsd-3-clause
| 14,656
|
[
"Amber"
] |
13a4380e76b15824bd6a61852b76f2fa4505324b8f4418d3f543a41363257769
|
"""socket interactions for gadfly client and server"""
from select import select
# responses
SUCCESS = "SUCCESS"
EXCEPTION = "EXCEPTION"
def reply_exception(exception, info, socket):
"""send an exception back to the client"""
# any error is invisible to client
from gfserve import ServerError
import sys
try:
reply( (EXCEPTION, (exception, info)), socket)
except:
#info = "%s %s" % (sys.exc_type, sys.exc_value)
socket.close()
#raise ServerError, "reply_exception failed: "+`info`
def reply_success(data, socket):
"""report success with data back to client"""
reply( (SUCCESS, data), socket)
def reply(data, socket):
from marshal import dumps
marshaldata = dumps(data)
send_packet(socket, marshaldata)
socket.close()
def send_packet(socket, data):
"""blast out a length marked packet"""
send_len(data, socket)
socket.send(data)
def send_len(data, socket):
"""send length of data as cr terminated int rep"""
info = `len(data)`+"\n"
socket.send(info)
def send_certified_action(actor_name, action, arguments, password, socket):
from marshal import dumps
marshaldata = dumps( (action, arguments) )
cert = certificate(marshaldata, password)
#print actor_name, cert, marshaldata
marshaldata = dumps( (actor_name, cert, marshaldata) )
send_packet(socket, marshaldata)
def unpack_certified_data(data):
from marshal import loads
# sanity check
unpack = (actor_name, certificate, marshaldata) = loads(data)
return unpack
def recv_data(socket, timeout=10):
"""receive data or time out"""
from time import time
endtime = time() + timeout
reader = Packet_Reader(socket)
done = 0
while not done:
timeout = endtime - time()
if timeout<0:
raise IOError, "socket time out (1)"
(readable, dummy, error) = select([socket], [], [socket], timeout)
if error:
raise IOError, "socket in error state"
if not readable:
raise IOError, "socket time out (2)"
reader.poll()
done = (reader.mode==READY)
return reader.data
def interpret_response(data):
"""interpret response data, raise exception if needed"""
from marshal import loads
(indicator, data) = loads(data)
if indicator==SUCCESS:
return data
elif indicator==EXCEPTION:
# ???
raise EXCEPTION, data
else:
raise ValueError, "unknown indicator: "+`indicator`
# packet reader modes
LEN = "LEN"
DATA = "DATA"
READY = "READY"
ERROR = "ERROR"
BLOCK_SIZE = 4028
LEN_LIMIT = BLOCK_SIZE * 10
class Packet_Reader:
"""nonblocking pseudo-packet reader."""
# packets come in as decimal_len\ndata
# (note: cr! not crlf)
# kick too large requests if set
limit_len = LEN_LIMIT
def __init__(self, socket):
self.socket = socket
self.length = None
self.length_remaining = None
self.len_list = []
self.data_list = []
self.received = ""
self.data = None
self.mode = LEN
def __len__(self):
if self.mode is LEN:
raise ValueError, "still reading length"
return self.length
def get_data(self):
if self.mode is not READY:
raise ValueError, "still reading"
return self.data
def poll(self):
mode = self.mode
if mode is READY:
raise ValueError, "data is ready"
if mode is ERROR:
raise ValueError, "socket error previously detected"
socket = self.socket
(readable, dummy, error) = select([socket], [], [socket], 0)
if error:
self.socket.close()
self.mode = ERROR
raise ValueError, "socket is in error state"
if readable:
if mode is LEN:
self.read_len()
# note: do not fall thru automatically
elif mode is DATA:
self.read_data()
def read_len(self):
"""assume socket is readable now, read length"""
socket = self.socket
received = self.received
len_list = self.len_list
if not received:
# 10 bytes at a time until len is read.
received = socket.recv(10)
while received:
# consume, test one char
input = received[0]
received = received[1:]
if input == "\n":
# done reading length
from string import join, atoi
try:
length = self.length = atoi(join(len_list, ""))
except:
self.mode = ERROR
socket.close()
raise ValueError, "bad len string? "+`len_list`
self.received = received
self.length_remaining = length
self.mode = DATA
limit_len = self.limit_len
if limit_len and length>limit_len:
raise ValueError, "Length too big: "+`(length, limit_len)`
return
if len(len_list)>10:
self.mode = ERROR
socket.close()
raise ValueError, "len_list too long: "+`len_list`
len_list.append(input)
if not received:
(readable, dummy, error) = select(\
[socket], [], [socket], 0)
if error:
self.mode = ERROR
socket.close()
raise ValueError, "socket in error state"
if readable:
received = socket.recv(10)
# remember extra data received.
self.received = received
def read_data(self):
# assume socket is readable
socket = self.socket
received = self.received
length_remaining = self.length_remaining
data_list = self.data_list
if received:
data_list.append(received)
self.received = ""
length_remaining = length_remaining - len(received)
recv_len = max(length_remaining, BLOCK_SIZE)
received = socket.recv(recv_len)
if received:
data_list.append(received)
length_remaining = length_remaining - len(received)
if length_remaining<1:
self.mode = READY
from string import join
self.data = join(data_list, "")
self.length_remaining = length_remaining
def certificate(String, password):
"""generate a certificate for a string, using a password"""
from md5 import new
if not String:
raise ValueError, "cannot generate certificate for empty string"
taggedstring = password + String
return new(taggedstring).digest()
def certify(String, cert, password):
"""check a certificate for a string"""
return certificate(String, password) == cert
|
Donkyhotay/MoonPy
|
zope/rdb/gadfly/gfsocket.py
|
Python
|
gpl-3.0
| 6,946
|
[
"BLAST"
] |
8db6e748e79f9b198680204add0c35461709797d1e177ce5f907cf655fd997a6
|
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from phonopy.harmonic.dynamical_matrix import get_dynamical_matrix
from phonopy.units import VaspToTHz, Hbar, EV, Angstrom, THz, AMU
from phono3py.phonon.solver import run_phonon_solver_c, run_phonon_solver_py
from phono3py.phonon3.real_to_reciprocal import RealToReciprocal
from phono3py.phonon3.reciprocal_to_normal import ReciprocalToNormal
from phono3py.phonon3.triplets import (get_triplets_at_q,
get_nosym_triplets_at_q,
get_bz_grid_address)
class Interaction(object):
def __init__(self,
supercell,
primitive,
mesh,
symmetry,
fc3=None,
band_indices=None,
constant_averaged_interaction=None,
frequency_factor_to_THz=VaspToTHz,
frequency_scale_factor=None,
unit_conversion=None,
is_mesh_symmetry=True,
symmetrize_fc3q=False,
cutoff_frequency=None,
lapack_zheev_uplo='L'):
self._supercell = supercell
self._primitive = primitive
self._mesh = np.array(mesh, dtype='intc')
self._symmetry = symmetry
self._band_indices = None
self._set_band_indices(band_indices)
self._constant_averaged_interaction = constant_averaged_interaction
self._frequency_factor_to_THz = frequency_factor_to_THz
self._frequency_scale_factor = frequency_scale_factor
self._set_fc3(fc3)
# Unit to eV^2
if unit_conversion is None:
num_grid = np.prod(self._mesh)
self._unit_conversion = ((Hbar * EV) ** 3 / 36 / 8
* EV ** 2 / Angstrom ** 6
/ (2 * np.pi * THz) ** 3
/ AMU ** 3 / num_grid
/ EV ** 2)
else:
self._unit_conversion = unit_conversion
if cutoff_frequency is None:
self._cutoff_frequency = 0
else:
self._cutoff_frequency = cutoff_frequency
self._is_mesh_symmetry = is_mesh_symmetry
self._symmetrize_fc3q = symmetrize_fc3q
self._lapack_zheev_uplo = lapack_zheev_uplo
self._symprec = symmetry.get_symmetry_tolerance()
self._grid_point = None
self._triplets_at_q = None
self._weights_at_q = None
self._triplets_map_at_q = None
self._ir_map_at_q = None
self._grid_address = None
self._bz_map = None
self._interaction_strength = None
self._g_zero = None
self._phonon_done = None
self._frequencies = None
self._eigenvectors = None
self._dm = None
self._nac_params = None
self._nac_q_direction = None
self._band_index_count = 0
svecs, multiplicity = self._primitive.get_smallest_vectors()
self._smallest_vectors = svecs
self._multiplicity = multiplicity
self._masses = np.array(self._primitive.masses, dtype='double')
self._p2s = self._primitive.p2s_map
self._s2p = self._primitive.s2p_map
self._allocate_phonon()
def run(self, lang='C', g_zero=None):
num_band = len(self._primitive) * 3
num_triplets = len(self._triplets_at_q)
self._interaction_strength = np.empty(
(num_triplets, len(self._band_indices), num_band, num_band),
dtype='double')
if self._constant_averaged_interaction is None:
self._interaction_strength[:] = 0
if lang == 'C':
self._run_c(g_zero)
else:
self._run_py()
else:
num_grid = np.prod(self._mesh)
self._interaction_strength[:] = (
self._constant_averaged_interaction / num_grid)
@property
def interaction_strength(self):
return self._interaction_strength
def get_interaction_strength(self):
warnings.warn("Use attribute, interaction_strength.",
DeprecationWarning)
return self.interaction_strength
@property
def mesh_numbers(self):
return self._mesh
def get_mesh_numbers(self):
warnings.warn("Use attribute, mesh_numbers.", DeprecationWarning)
return self.mesh_numbers
@property
def is_mesh_symmetry(self):
return self._is_mesh_symmetry
@property
def fc3(self):
return self._fc3
def get_fc3(self):
warnings.warn("Use attribute, fc3.", DeprecationWarning)
return self.fc3
@property
def dynamical_matrix(self):
return self._dm
def get_dynamical_matrix(self):
warnings.warn("Use attribute, dynamical_matrix.", DeprecationWarning)
return self.dynamical_matrix
@property
def primitive(self):
return self._primitive
def get_primitive(self):
warnings.warn("Use attribute, primitive.", DeprecationWarning)
return self.primitive
@property
def supercell(self):
return self._supercell
def get_supercell(self):
warnings.warn("Use attribute, supercell.", DeprecationWarning)
return self.supercell
def get_triplets_at_q(self):
return (self._triplets_at_q,
self._weights_at_q,
self._triplets_map_at_q,
self._ir_map_at_q)
@property
def grid_address(self):
return self._grid_address
def get_grid_address(self):
warnings.warn("Use attribute, grid_address.", DeprecationWarning)
return self.grid_address
@property
def bz_map(self):
return self._bz_map
def get_bz_map(self):
warnings.warn("Use attribute, bz_map.", DeprecationWarning)
return self.bz_map
@property
def band_indices(self):
return self._band_indices
def get_band_indices(self):
warnings.warn("Use attribute, band_indices.", DeprecationWarning)
return self.band_indices
@property
def nac_params(self):
return self._nac_params
@property
def nac_q_direction(self):
return self._nac_q_direction
def get_nac_q_direction(self):
warnings.warn("Use attribute, nac_q_direction.", DeprecationWarning)
return self.nac_q_direction
@property
def zero_value_positions(self):
return self._g_zero
def get_zero_value_positions(self):
warnings.warn("Use attribute, zero_value_positions.",
DeprecationWarning)
return self.zero_value_positions
def get_phonons(self):
return self._frequencies, self._eigenvectors, self._phonon_done
@property
def frequency_factor_to_THz(self):
return self._frequency_factor_to_THz
def get_frequency_factor_to_THz(self):
warnings.warn("Use attribute, frequency_factor_to_THz.",
DeprecationWarning)
return self.frequency_factor_to_THz
@property
def lapack_zheev_uplo(self):
return self._lapack_zheev_uplo
def get_lapack_zheev_uplo(self):
warnings.warn("Use attribute, lapack_zheev_uplo.", DeprecationWarning)
return self.lapack_zheev_uplo
@property
def cutoff_frequency(self):
return self._cutoff_frequency
def get_cutoff_frequency(self):
warnings.warn("Use attribute, cutoff_frequency.", DeprecationWarning)
return self.cutoff_frequency
def get_averaged_interaction(self):
"""Return sum over phonon triplets of interaction strength
See Eq.(21) of PRB 91, 094306 (2015)
"""
# v[triplet, band0, band, band]
v = self._interaction_strength
w = self._weights_at_q
v_sum = np.dot(w, v.sum(axis=2).sum(axis=2))
return v_sum / np.prod(v.shape[2:])
def get_primitive_and_supercell_correspondence(self):
return (self._smallest_vectors,
self._multiplicity,
self._p2s,
self._s2p,
self._masses)
def get_unit_conversion_factor(self):
return self._unit_conversion
def get_constant_averaged_interaction(self):
return self._constant_averaged_interaction
def set_interaction_strength(self, pp_strength, g_zero=None):
self._interaction_strength = pp_strength
self._g_zero = g_zero
def set_grid_point(self, grid_point, stores_triplets_map=False):
reciprocal_lattice = np.linalg.inv(self._primitive.cell)
if not self._is_mesh_symmetry:
(triplets_at_q,
weights_at_q,
grid_address,
bz_map,
triplets_map_at_q,
ir_map_at_q) = get_nosym_triplets_at_q(
grid_point,
self._mesh,
reciprocal_lattice,
stores_triplets_map=stores_triplets_map)
else:
(triplets_at_q,
weights_at_q,
grid_address,
bz_map,
triplets_map_at_q,
ir_map_at_q) = get_triplets_at_q(
grid_point,
self._mesh,
self._symmetry.get_pointgroup_operations(),
reciprocal_lattice,
stores_triplets_map=stores_triplets_map)
# Special treatment of symmetry is applied when q_direction is used.
if self._nac_q_direction is not None:
if (grid_address[grid_point] == 0).all():
self._phonon_done[grid_point] = 0
self.run_phonon_solver(np.array([grid_point], dtype='uintp'))
rotations = []
for r in self._symmetry.get_pointgroup_operations():
dq = self._nac_q_direction
dq /= np.linalg.norm(dq)
diff = np.dot(dq, r) - dq
if (abs(diff) < 1e-5).all():
rotations.append(r)
(triplets_at_q,
weights_at_q,
grid_address,
bz_map,
triplets_map_at_q,
ir_map_at_q) = get_triplets_at_q(
grid_point,
self._mesh,
np.array(rotations, dtype='intc', order='C'),
reciprocal_lattice,
is_time_reversal=False,
stores_triplets_map=stores_triplets_map)
for triplet in triplets_at_q:
sum_q = (grid_address[triplet]).sum(axis=0)
if (sum_q % self._mesh != 0).any():
print("============= Warning ==================")
print("%s" % triplet)
for tp in triplet:
print("%s %s" %
(grid_address[tp],
np.linalg.norm(
np.dot(reciprocal_lattice,
grid_address[tp] /
self._mesh.astype('double')))))
print("%s" % sum_q)
print("============= Warning ==================")
self._grid_point = grid_point
self._triplets_at_q = triplets_at_q
self._weights_at_q = weights_at_q
self._triplets_map_at_q = triplets_map_at_q
# self._grid_address = grid_address
# self._bz_map = bz_map
self._ir_map_at_q = ir_map_at_q
def init_dynamical_matrix(self,
fc2,
supercell,
primitive,
nac_params=None,
solve_dynamical_matrices=True,
decimals=None,
verbose=False):
self._nac_params = nac_params
self._dm = get_dynamical_matrix(
fc2,
supercell,
primitive,
nac_params=nac_params,
frequency_scale_factor=self._frequency_scale_factor,
decimals=decimals,
symprec=self._symprec)
if solve_dynamical_matrices:
self.run_phonon_solver(verbose=verbose)
else:
self.run_phonon_solver(np.array([0], dtype='uintp'),
verbose=verbose)
if (self._grid_address[0] == 0).all():
if np.sum(self._frequencies[0] < self._cutoff_frequency) < 3:
for i, f in enumerate(self._frequencies[0, :3]):
if not (f < self._cutoff_frequency):
self._frequencies[0, i] = 0
print("=" * 26 + " Warning " + "=" * 26)
print(" Phonon frequency of band index %d at Gamma "
"is calculated to be %f." % (i + 1, f))
print(" But this frequency is forced to be zero.")
print("=" * 61)
def set_nac_q_direction(self, nac_q_direction=None):
if nac_q_direction is not None:
self._nac_q_direction = np.array(nac_q_direction, dtype='double')
def set_phonon_data(self, frequencies, eigenvectors, grid_address):
if grid_address.shape != self._grid_address.shape:
raise RuntimeError("Input grid address size is inconsistent. "
"Setting phonons faild.")
if (self._grid_address - grid_address).all():
raise RuntimeError("Input grid addresses are inconsistent. "
"Setting phonons faild.")
else:
self._phonon_done[:] = 1
self._frequencies[:] = frequencies
self._eigenvectors[:] = eigenvectors
def set_phonons(self, grid_points=None, verbose=False):
msg = ("Interaction.set_phonons is deprecated at v2.0. "
"Use Interaction.run_phonon_solver intead.")
warnings.warn(msg, DeprecationWarning)
self.run_phonon_solver(grid_points=grid_points, verbose=verbose)
def run_phonon_solver(self, grid_points=None, verbose=False):
if grid_points is None:
_grid_points = np.arange(len(self._grid_address), dtype='uintp')
else:
_grid_points = grid_points
self._run_phonon_solver_c(_grid_points, verbose=verbose)
def delete_interaction_strength(self):
self._interaction_strength = None
self._g_zero = None
def _set_fc3(self, fc3):
if fc3 is None and self._constant_averaged_interaction is not None:
msg = ("fc3 can not be None unless constant_averaged_interaction "
"is given.")
raise AttributeError(msg)
if (type(fc3) == np.ndarray and
fc3.dtype == np.dtype('double') and
fc3.flags.aligned and
fc3.flags.owndata and
fc3.flags.c_contiguous and
self._frequency_scale_factor is None):
self._fc3 = fc3
elif self._frequency_scale_factor is None:
self._fc3 = np.array(fc3, dtype='double', order='C')
else:
self._fc3 = np.array(fc3 * self._frequency_scale_factor ** 2,
dtype='double', order='C')
def _set_band_indices(self, band_indices):
num_band = len(self._primitive) * 3
if band_indices is None:
self._band_indices = np.arange(num_band, dtype='intc')
else:
self._band_indices = np.array(band_indices, dtype='intc')
def _run_c(self, g_zero):
import phono3py._phono3py as phono3c
if g_zero is None or self._symmetrize_fc3q:
_g_zero = np.zeros(self._interaction_strength.shape,
dtype='byte', order='C')
else:
_g_zero = g_zero
phono3c.interaction(self._interaction_strength,
_g_zero,
self._frequencies,
self._eigenvectors,
self._triplets_at_q,
self._grid_address,
self._mesh,
self._fc3,
self._smallest_vectors,
self._multiplicity,
self._masses,
self._p2s,
self._s2p,
self._band_indices,
self._symmetrize_fc3q,
self._cutoff_frequency)
self._interaction_strength *= self._unit_conversion
self._g_zero = g_zero
def _run_phonon_solver_c(self, grid_points, verbose=False):
run_phonon_solver_c(self._dm,
self._frequencies,
self._eigenvectors,
self._phonon_done,
grid_points,
self._grid_address,
self._mesh,
self._frequency_factor_to_THz,
self._nac_q_direction,
self._lapack_zheev_uplo,
verbose=verbose)
def _run_py(self):
r2r = RealToReciprocal(self._fc3,
self._supercell,
self._primitive,
self._mesh,
symprec=self._symprec)
r2n = ReciprocalToNormal(self._primitive,
self._frequencies,
self._eigenvectors,
self._band_indices,
cutoff_frequency=self._cutoff_frequency)
for i, grid_triplet in enumerate(self._triplets_at_q):
print("%d / %d" % (i + 1, len(self._triplets_at_q)))
r2r.run(self._grid_address[grid_triplet])
fc3_reciprocal = r2r.get_fc3_reciprocal()
for gp in grid_triplet:
self._run_phonon_solver_py(gp)
r2n.run(fc3_reciprocal, grid_triplet)
self._interaction_strength[i] = np.abs(
r2n.get_reciprocal_to_normal()) ** 2 * self._unit_conversion
def _run_phonon_solver_py(self, grid_point):
run_phonon_solver_py(grid_point,
self._phonon_done,
self._frequencies,
self._eigenvectors,
self._grid_address,
self._mesh,
self._dm,
self._frequency_factor_to_THz,
self._lapack_zheev_uplo)
def _allocate_phonon(self):
primitive_lattice = np.linalg.inv(self._primitive.cell)
self._grid_address, self._bz_map = get_bz_grid_address(
self._mesh, primitive_lattice, with_boundary=True)
num_band = len(self._primitive) * 3
num_grid = len(self._grid_address)
self._phonon_done = np.zeros(num_grid, dtype='byte')
self._frequencies = np.zeros((num_grid, num_band), dtype='double')
itemsize = self._frequencies.itemsize
self._eigenvectors = np.zeros((num_grid, num_band, num_band),
dtype=("c%d" % (itemsize * 2)))
|
atztogo/phono3py
|
phono3py/phonon3/interaction.py
|
Python
|
bsd-3-clause
| 21,035
|
[
"phonopy"
] |
a1cd1cac0b75a522649744c1418942fc751053f96eaf6d93888b766e5dc90727
|
##
# Copyright 2009-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing WRF-Fire, implemented as an easyblock
author: Kenneth Hoste (HPC-UGent)
"""
import os
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.filetools import apply_regex_substitutions, change_dir, patch_perl_script_autoflush
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_WRF_minus_Fire(EasyBlock):
"""Support for building/installing WRF-Fire."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for WRF-Fire."""
extra_vars = {
'buildtype': [None, "Specify the type of build (serial, smpar (OpenMP), " \
"dmpar (MPI), dm+sm (hybrid OpenMP/MPI)).", MANDATORY],
'runtest': [True, "Build and run WRF tests", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to WRF."""
super(EB_WRF_minus_Fire, self).__init__(*args, **kwargs)
self.build_in_installdir = True
def extract_step(self):
"""Extract WRF-Fire sources."""
self.cfg.update('unpack_options', '--strip-components=1')
super(EB_WRF_minus_Fire, self).extract_step()
def configure_step(self):
"""Custom configuration procedure for WRF-Fire."""
comp_fam = self.toolchain.comp_family()
# define $NETCDF* for netCDF dependency
netcdf_fortran = get_software_root('netCDF-Fortran')
if netcdf_fortran:
env.setvar('NETCDF', netcdf_fortran)
else:
raise EasyBuildError("Required dependendy netCDF-Fortran is missing")
# define $PHDF5 for parallel HDF5 dependency
hdf5 = get_software_root('HDF5')
if hdf5 and os.path.exists(os.path.join(hdf5, 'bin', 'h5pcc')):
env.setvar('PHDF5', hdf5)
# first, configure WRF part
change_dir(os.path.join(self.cfg['start_dir'], 'WRFV3'))
# instruct WRF-Fire to create netCDF v4 output files
env.setvar('WRFIO_NETCDF4_FILE_SUPPORT', '1')
# patch arch/Config_new.pl script, so that run_cmd_qa receives all output to answer questions
patch_perl_script_autoflush(os.path.join('arch', 'Config_new.pl'))
# determine build type option to look for
known_build_type_options = {
toolchain.INTELCOMP: "Linux x86_64 i486 i586 i686, ifort compiler with icc",
toolchain.GCC: "x86_64 Linux, gfortran compiler with gcc",
toolchain.PGI: "Linux x86_64, PGI compiler with pgcc",
}
build_type_option = known_build_type_options.get(comp_fam)
if build_type_option is None:
raise EasyBuildError("Don't know which WPS configure option to select for compiler family %s", comp_fam)
build_type_question = "\s*(?P<nr>[0-9]+).\s*%s\s*\(%s\)" % (build_type_option, self.cfg['buildtype'])
qa = {
"Compile for nesting? (1=basic, 2=preset moves, 3=vortex following) [default 1]:": '1',
}
std_qa = {
# named group in match will be used to construct answer
r"%s.*\n(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: '%(nr)s',
}
run_cmd_qa('./configure', qa, std_qa=std_qa, log_all=True, simple=True)
cpp_flag = None
if comp_fam == toolchain.INTELCOMP:
cpp_flag = '-fpp'
elif comp_fam == toolchain.GCC:
cpp_flag = '-cpp'
else:
raise EasyBuildError("Don't know which flag to use to specify that Fortran files were preprocessed")
# patch configure.wrf to get things right
comps = {
'CFLAGS_LOCAL': os.getenv('CFLAGS'),
'DM_FC': os.getenv('MPIF90'),
'DM_CC': "%s -DMPI2_SUPPORT" % os.getenv('MPICC'),
'FCOPTIM': os.getenv('FFLAGS'),
# specify that Fortran files have been preprocessed with cpp,
# see http://forum.wrfforum.com/viewtopic.php?f=5&t=6086
'FORMAT_FIXED': "-FI %s" % cpp_flag,
'FORMAT_FREE': "-FR %s" % cpp_flag,
}
regex_subs = [(r"^(%s\s*=\s*).*$" % k, r"\1 %s" % v) for (k, v) in comps.items()]
apply_regex_substitutions('configure.wrf', regex_subs)
# also configure WPS part
change_dir(os.path.join(self.cfg['start_dir'], 'WPS'))
# patch arch/Config_new.pl script, so that run_cmd_qa receives all output to answer questions
patch_perl_script_autoflush(os.path.join('arch', 'Config.pl'))
# determine build type option to look for
known_build_type_options = {
toolchain.INTELCOMP: "PC Linux x86_64, Intel compiler",
toolchain.GCC: "PC Linux x86_64, g95 compiler",
toolchain.PGI: "PC Linux x86_64 (IA64 and Opteron), PGI compiler 5.2 or higher",
}
build_type_option = known_build_type_options.get(comp_fam)
if build_type_option is None:
raise EasyBuildError("Don't know which WPS configure option to select for compiler family %s", comp_fam)
known_wps_build_types = {
'dmpar': 'DM parallel',
'smpar': 'serial',
}
wps_build_type = known_wps_build_types.get(self.cfg['buildtype'])
if wps_build_type is None:
raise EasyBuildError("Don't know which WPS build type to pick for '%s'", self.cfg['builddtype'])
build_type_question = "\s*(?P<nr>[0-9]+).\s*%s.*%s(?!NO GRIB2)" % (build_type_option, wps_build_type)
std_qa = {
# named group in match will be used to construct answer
r"%s.*\n(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: '%(nr)s',
}
run_cmd_qa('./configure', {}, std_qa=std_qa, log_all=True, simple=True)
# patch configure.wps to get things right
comps = {
'CC': '%s %s' % (os.getenv('MPICC'), os.getenv('CFLAGS')),
'FC': '%s %s' % (os.getenv('MPIF90'), os.getenv('F90FLAGS'))
}
regex_subs = [(r"^(%s\s*=\s*).*$" % k, r"\1 %s" % v) for (k, v) in comps.items()]
# specify that Fortran90 files have been preprocessed with cpp
regex_subs.extend([
(r"^(F77FLAGS\s*=\s*)", r"\1 %s " % cpp_flag),
(r"^(FFLAGS\s*=\s*)", r"\1 %s " % cpp_flag),
])
apply_regex_substitutions('configure.wps', regex_subs)
def build_step(self):
"""Custom build procedure for WRF-Fire."""
cmd = './compile'
if self.cfg['parallel']:
cmd += " -j %d" % self.cfg['parallel']
# first, build WRF part
change_dir(os.path.join(self.cfg['start_dir'], 'WRFV3'))
(out, ec) = run_cmd(cmd + ' em_fire', log_all=True, simple=False, log_ok=True)
# next, build WPS part
change_dir(os.path.join(self.cfg['start_dir'], 'WPS'))
(out, ec) = run_cmd('./compile', log_all=True, simple=False, log_ok=True)
def test_step(self):
"""Custom built-in test procedure for WRF-Fire."""
if self.cfg['runtest']:
change_dir(os.path.join(self.cfg['start_dir'], 'WRFV3', 'test', 'em_fire', 'hill'))
if self.cfg['buildtype'] in ['dmpar', 'smpar', 'dm+sm']:
test_cmd = "ulimit -s unlimited && %s && %s" % (self.toolchain.mpi_cmd_for("./ideal.exe", 1),
self.toolchain.mpi_cmd_for("./wrf.exe", 2))
else:
test_cmd = "ulimit -s unlimited && ./ideal.exe && ./wrf.exe"
run_cmd(test_cmd, simple=True, log_all=True, log_ok=True)
# building/installing is done in build_step, so we can run tests
def install_step(self):
"""Building was done in install dir, so nothing to do in install_step."""
pass
def sanity_check_step(self):
"""Custom sanity check for WRF-Fire."""
custom_paths = {
'files': [os.path.join('WRFV3', 'main', f) for f in ['ideal.exe', 'libwrflib.a', 'wrf.exe']] +
[os.path.join('WPS', f) for f in ['geogrid.exe', 'metgrid.exe', 'ungrib.exe']],
'dirs': [os.path.join('WRFV3', d) for d in ['main', 'run']],
}
super(EB_WRF_minus_Fire, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for generated WRF-Fire module file."""
wrf_maindir = os.path.join('WRFV3', 'main')
return {
'LD_LIBRARY_PATH': [wrf_maindir],
'PATH': [wrf_maindir, 'WPS'],
}
def make_module_extra(self):
"""Add netCDF environment variables to module file."""
txt = super(EB_WRF_minus_Fire, self).make_module_extra()
netcdf_fortran = get_software_root('netCDF-Fortran')
if netcdf_fortran:
txt += self.module_generator.set_environment('NETCDF', netcdf_fortran)
return txt
|
bartoldeman/easybuild-easyblocks
|
easybuild/easyblocks/w/wrf_fire.py
|
Python
|
gpl-2.0
| 10,206
|
[
"NetCDF"
] |
7b55cabda3ffac5f15d5d38de7b7ff33eb3a58a75987442be8d697a7eb395688
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017-2018 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from distutils import archive_util, dir_util
from xml.etree.ElementTree import ElementTree
import argparse
import colorama
import datetime
import glob
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
import traceback
import urllib
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
VERSION = "1.1.1"
SETTINGS_FILENAME = "blenderseed.package.configuration.xml"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
GREEN_CHECKMARK = u"{0}\u2713{1}".format(colorama.Style.BRIGHT + colorama.Fore.GREEN, colorama.Style.RESET_ALL)
RED_CROSSMARK = u"{0}\u2717{1}".format(colorama.Style.BRIGHT + colorama.Fore.RED, colorama.Style.RESET_ALL)
def trace(message):
# encode('utf-8') is required to support output redirection to files or pipes.
print(u" {0}{1}{2}".format(colorama.Style.DIM + colorama.Fore.WHITE, message, colorama.Style.RESET_ALL).encode('utf-8'))
def info(message):
print(u" {0}".format(message).encode('utf-8'))
def progress(message):
print(u" {0}...".format(message).encode('utf-8'))
def warning(message):
print(u" {0}Warning: {1}.{2}".format(colorama.Style.BRIGHT + colorama.Fore.MAGENTA, message, colorama.Style.RESET_ALL).encode('utf-8'))
def fatal(message):
print(u"{0}Fatal: {1}. Aborting.{2}".format(colorama.Style.BRIGHT + colorama.Fore.RED, message, colorama.Style.RESET_ALL).encode('utf-8'))
if sys.exc_info()[0]:
print(traceback.format_exc())
sys.exit(1)
def exe(filepath):
return filepath + ".exe" if os.name == "nt" else filepath
def safe_delete_file(path):
try:
if os.path.exists(path):
os.remove(path)
except OSError:
fatal("Failed to delete file '" + path + "'")
def on_rmtree_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed.
# Let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def safe_delete_directory(path):
Attempts = 10
for attempt in range(Attempts):
try:
if os.path.exists(path):
shutil.rmtree(path, onerror=on_rmtree_error)
return
except OSError:
if attempt < Attempts - 1:
time.sleep(0.5)
else:
fatal("Failed to delete directory '" + path + "'")
def safe_delete_directory_recursively(root_path, directory_name):
safe_delete_directory(os.path.join(root_path, directory_name))
for entry in os.listdir(root_path):
subdirectory = os.path.join(root_path, entry)
if os.path.isdir(subdirectory):
safe_delete_directory_recursively(subdirectory, directory_name)
def safe_make_directory(path):
if not os.path.isdir(path):
os.makedirs(path)
def pushd(path):
old_path = os.getcwd()
os.chdir(path)
return old_path
def copy_glob(input_pattern, output_path):
for input_file in glob.glob(input_pattern):
shutil.copy(input_file, output_path)
#--------------------------------------------------------------------------------------------------
# Settings.
#--------------------------------------------------------------------------------------------------
class Settings:
def load(self):
self.this_dir = os.path.dirname(os.path.realpath(__file__))
self.root_dir = os.path.join(self.this_dir, "..")
print("Loading settings from " + SETTINGS_FILENAME + "...")
tree = ElementTree()
try:
tree.parse(SETTINGS_FILENAME)
except IOError:
fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'")
self.__load_values(tree)
def print_summary(self):
print("")
print(" Platform: " + self.platform)
print(" Path to appleseed release: " + self.appleseed_release_path)
print(" Path to appleseed binaries: " + self.appleseed_bin_path)
print(" Path to appleseed libraries: " + self.appleseed_lib_path)
print(" Path to appleseed shaders: " + self.appleseed_shaders_path)
print(" Path to appleseed schemas: " + self.appleseed_schemas_path)
print(" Path to appleseed.python: " + self.appleseed_python_path)
print(" Output directory: " + self.output_dir)
print("")
def __load_values(self, tree):
self.platform = self.__get_required(tree, "platform")
self.appleseed_release_path = self.__get_required(tree, "appleseed_release_path")
os.environ['APPLESEED'] = self.appleseed_release_path
self.appleseed_bin_path = os.path.expandvars(self.__get_required(tree, "appleseed_bin_path"))
self.appleseed_lib_path = os.path.expandvars(self.__get_required(tree, "appleseed_lib_path"))
self.appleseed_shaders_path = os.path.expandvars(self.__get_required(tree, "appleseed_shaders_path"))
self.appleseed_schemas_path = os.path.expandvars(self.__get_required(tree, "appleseed_schemas_path"))
self.appleseed_python_path = os.path.expandvars(self.__get_required(tree, "appleseed_python_path"))
self.output_dir = os.path.expandvars(self.__get_required(tree, "output_dir"))
def __get_required(self, tree, key):
value = tree.findtext(key)
if value is None:
fatal("Missing value \"{0}\" in configuration file".format(key))
return value
#--------------------------------------------------------------------------------------------------
# Base package builder.
#--------------------------------------------------------------------------------------------------
class PackageBuilder(object):
def __init__(self, settings, package_version, build_date, no_release=False):
self.settings = settings
self.package_version = package_version
self.build_date = build_date
self.no_release = no_release
def build_package(self):
print("Building package:")
print("")
self.orchestrate()
print("")
print("The package was successfully built.")
def orchestrate(self):
self.remove_leftovers()
self.copy_appleseed_python()
self.copy_binaries()
self.copy_schemas()
self.copy_shaders()
self.download_settings_files()
self.remove_pyc_files()
self.copy_dependencies()
self.post_process_package()
if not self.no_release:
self.deploy_blenderseed_to_stage()
self.clean_stage()
self.build_final_zip_file()
self.remove_stage()
def remove_leftovers(self):
progress("Removing leftovers from previous invocations")
safe_delete_directory(os.path.join(self.settings.root_dir, "appleseed"))
safe_delete_directory("blenderseed")
def copy_appleseed_python(self):
progress("Copying appleseed.python to root directory")
# Create destination directory.
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
safe_make_directory(lib_dir)
# Copy appleseed.python.
dir_util.copy_tree(self.settings.appleseed_python_path, lib_dir)
def copy_binaries(self):
progress("Copying binaries to root directory")
# Create destination directory.
bin_dir = os.path.join(self.settings.root_dir, "appleseed", "bin")
safe_make_directory(bin_dir)
# Copy appleseed binaries.
for bin in [exe("appleseed.cli")]:
shutil.copy(os.path.join(self.settings.appleseed_bin_path, bin), bin_dir)
def copy_schemas(self):
progress("Copying schemas to root directory")
dir_util.copy_tree(self.settings.appleseed_schemas_path, os.path.join(self.settings.root_dir, "appleseed", "schemas"))
safe_delete_file(os.path.join(self.settings.root_dir, "appleseed", "schemas", ".gitignore"))
def copy_shaders(self):
progress("Copying shaders to root directory")
# Create destination directory.
shaders_dir = os.path.join(self.settings.root_dir, "appleseed", "shaders")
safe_make_directory(shaders_dir)
osl_headers = ("stdosl.h", "oslutil.h", "as_osl_extensions.h")
for header in osl_headers:
shutil.copy(os.path.join(self.settings.appleseed_shaders_path, header), shaders_dir)
self.__do_copy_shaders(os.path.join(self.settings.appleseed_shaders_path, "appleseed"), shaders_dir)
self.__do_copy_shaders(os.path.join(self.settings.appleseed_shaders_path, "blenderseed"), shaders_dir)
def __do_copy_shaders(self, source_dir, target_dir):
for root, dirs, files in os.walk(source_dir):
for f in files:
if f.endswith(".oso"):
shutil.copy(os.path.join(root, f), target_dir)
def download_settings_files(self):
progress("Downloading settings files to root directory")
# Create destination directory.
settings_dir = os.path.join(self.settings.root_dir, "appleseed", "settings")
safe_make_directory(settings_dir)
for file in ["appleseed.cli.xml"]:
urllib.urlretrieve(
"https://raw.githubusercontent.com/appleseedhq/appleseed/master/sandbox/settings/{0}".format(file),
os.path.join(settings_dir, file))
def remove_pyc_files(self):
progress("Removing pyc files from root directory")
for root, dirs, files in os.walk(os.path.join(self.settings.root_dir, "appleseed")):
for f in files:
if f.endswith(".pyc"):
safe_delete_file(os.path.join(root, f))
def deploy_blenderseed_to_stage(self):
progress("Deploying blenderseed to staging directory")
shutil.copytree(self.settings.root_dir, "blenderseed", ignore=shutil.ignore_patterns("scripts"))
def clean_stage(self):
progress("Cleaning staging directory")
safe_delete_directory_recursively("blenderseed", "__pycache__")
for subdirectory in [".git", ".idea", "archives", "docs", "scripts", "tests", ".idea", ".vscode"]:
safe_delete_directory(os.path.join("blenderseed", subdirectory))
for file in [".gitignore", "README.md"]:
safe_delete_file(os.path.join("blenderseed", file))
def build_final_zip_file(self):
progress("Building final zip file from staging directory")
package_name = "blenderseed-{0}-{1}-{2}".format(self.package_version, self.settings.platform, self.build_date)
package_path = os.path.join(self.settings.output_dir, package_name)
archive_util.make_zipfile(package_path, "blenderseed")
info("Package path: {0}".format(package_path + ".zip"))
def remove_stage(self):
progress("Deleting staging directory")
safe_delete_directory("blenderseed")
def run(self, cmdline):
trace("Running command line: {0}".format(cmdline))
os.system(cmdline)
def run_subprocess(self, cmdline):
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
#--------------------------------------------------------------------------------------------------
# Windows package builder.
#--------------------------------------------------------------------------------------------------
class WindowsPackageBuilder(PackageBuilder):
def copy_dependencies(self):
progress("Windows-specific: Copying dependencies")
bin_dir = self.settings.appleseed_bin_path
for dll in ["appleseed.dll"]:
shutil.copy(os.path.join(bin_dir, dll), os.path.join(self.settings.root_dir, "appleseed", "bin"))
def post_process_package(self):
pass
#--------------------------------------------------------------------------------------------------
# Mac package builder.
#--------------------------------------------------------------------------------------------------
class MacPackageBuilder(PackageBuilder):
SYSTEM_LIBS_PREFIXES = [
"/System/Library/",
"/usr/lib/libcurl",
"/usr/lib/libc++",
"/usr/lib/libbz2",
"/usr/lib/libSystem",
#"/usr/lib/libz",
"/usr/lib/libncurses",
"/usr/lib/libobjc.A.dylib"
]
def copy_dependencies(self):
progress("Mac-specific: Copying dependencies")
# Create destination directory.
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
safe_make_directory(lib_dir)
# Copy appleseed libraries.
for lib in ["libappleseed.dylib"]:
shutil.copy(os.path.join(self.settings.appleseed_lib_path, lib), lib_dir)
# Get shared libs needed by binaries.
all_libs = set()
for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")):
libs = self.__get_dependencies_for_file(bin)
all_libs = all_libs.union(libs)
# Get shared libs needed by appleseed.python.
appleseedpython_libs = self.__get_dependencies_for_file(
os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed", "_appleseedpython3.so"))
all_libs = all_libs.union(appleseedpython_libs)
# Get shared libs needed by libraries.
# TODO: we're not computing the full transitive closure here!
lib_libs = set()
for lib in all_libs:
libs = self.__get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = all_libs.union(lib_libs)
if True:
# Print dependencies.
trace(" Dependencies:")
for lib in all_libs:
trace(" {0}".format(lib))
# Copy needed libs to lib directory.
for lib in all_libs:
if True:
trace(" Copying {0} to {1}...".format(lib, lib_dir))
shutil.copy(lib, lib_dir)
def post_process_package(self):
progress("Mac-specific: Post-processing package")
self.__fixup_binaries()
def __fixup_binaries(self):
progress("Mac-specific: Fixing up binaries")
self.set_libraries_ids()
self.__change_library_paths_in_libraries()
self.__change_library_paths_in_executables()
def set_libraries_ids(self):
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
for dirpath, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext == ".dylib" or ext == ".so":
lib_path = os.path.join(dirpath, filename)
self.__set_library_id(lib_path, filename)
def __change_library_paths_in_libraries(self):
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
for dirpath, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext == ".dylib" or ext == ".so":
lib_path = os.path.join(dirpath, filename)
self.__change_library_paths_in_binary(lib_path)
def __change_library_paths_in_executables(self):
bin_dir = os.path.join(self.settings.root_dir, "appleseed", "bin")
for dirpath, dirnames, filenames in os.walk(bin_dir):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
exe_path = os.path.join(dirpath, filename)
self.__change_library_paths_in_binary(exe_path)
# Can be used on executables and dynamic libraries.
def __change_library_paths_in_binary(self, bin_path):
progress("Patching {0}".format(bin_path))
bin_dir = os.path.dirname(bin_path)
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
path_to_appleseed_lib = os.path.relpath(lib_dir, bin_dir)
# fix_paths set to False because we must retrieve the unmodified dependency in order to replace it by the correct one.
for lib_path in self.__get_dependencies_for_file(bin_path, fix_paths=False):
lib_name = os.path.basename(lib_path)
if path_to_appleseed_lib == ".":
self.__change_library_path(bin_path, lib_path, "@loader_path/{0}".format(lib_name))
else:
self.__change_library_path(bin_path, lib_path, "@loader_path/{0}/{1}".format(path_to_appleseed_lib, lib_name))
def __set_library_id(self, target, name):
self.run('install_name_tool -id "{0}" {1}'.format(name, target))
def __change_library_path(self, target, old, new):
self.run('install_name_tool -change "{0}" "{1}" {2}'.format(old, new, target))
def __get_dependencies_for_file(self, filepath, fix_paths=True):
filename = os.path.basename(filepath)
loader_path = os.path.dirname(filepath)
rpath = "/usr/local/lib/" # TODO: a great simplification
if True:
trace("Gathering dependencies for file")
trace(" {0}".format(filepath))
trace("with @loader_path set to")
trace(" {0}".format(loader_path))
trace("and @rpath hardcoded to")
trace(" {0}".format(rpath))
returncode, out, err = self.run_subprocess(["otool", "-L", filepath])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filepath, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
# Ignore self-references (why do these happen?).
if lib == filename:
continue
# Ignore system libs.
if self.__is_system_lib(lib):
continue
# Ignore Qt frameworks.
if re.search(r"Qt.*\.framework", lib):
continue
if fix_paths:
# Handle libs relative to @loader_path.
lib = lib.replace("@loader_path", loader_path)
# Handle libs relative to @rpath.
lib = lib.replace("@rpath", rpath)
# Try to handle other relative libs.
if not os.path.isabs(lib):
# TODO: generalize to a collection of user-specified search paths.
candidate = os.path.join(loader_path, lib)
if not os.path.exists(candidate):
candidate = os.path.join("/usr/local/lib/", lib)
if os.path.exists(candidate):
info("Resolved relative dependency {0} as {1}".format(lib, candidate))
lib = candidate
libs.add(lib)
if True:
trace("Dependencies for file {0}:".format(filepath))
for lib in libs:
if os.path.isfile(lib):
trace(u" {0} {1}".format(GREEN_CHECKMARK, lib))
else:
trace(u" {0} {1}".format(RED_CROSSMARK, lib))
# Don't check for missing dependencies if we didn't attempt to fix them.
if fix_paths:
for lib in libs:
if not os.path.isfile(lib):
fatal("Dependency {0} could not be found on disk".format(lib))
return libs
def __is_system_lib(self, lib):
for prefix in self.SYSTEM_LIBS_PREFIXES:
if lib.startswith(prefix):
return True
return False
#--------------------------------------------------------------------------------------------------
# Linux package builder.
#--------------------------------------------------------------------------------------------------
class LinuxPackageBuilder(PackageBuilder):
SYSTEM_LIBS_PREFIXES = [
"linux",
"librt",
"libpthread",
"libGL",
"libX",
"libselinux",
"libICE",
"libSM",
"libdl",
"libm.so",
"libgcc",
"libc.so",
"/lib64/ld-linux-",
"libstdc++",
"libxcb",
"libdrm",
"libnsl",
"libuuid",
"libgthread",
"libglib",
"libgobject",
"libglapi",
"libffi",
"libfontconfig",
"libutil",
"libpython",
"libxshmfence.so"
]
def plugin_extension(self):
return ".so"
def copy_dependencies(self):
progress("Linux-specific: Copying dependencies")
# Create destination directory.
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
safe_make_directory(lib_dir)
# Copy appleseed libraries.
for lib in ["libappleseed.so", "libappleseed.shared.so"]:
shutil.copy(os.path.join(self.settings.appleseed_lib_path, lib), lib_dir)
# Get shared libs needed by binaries.
all_libs = set()
for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")):
libs = self.__get_dependencies_for_file(bin)
all_libs = all_libs.union(libs)
# Get shared libs needed by appleseed.python.
appleseedpython_libs = self.__get_dependencies_for_file(
os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed", "_appleseedpython3.so"))
all_libs = all_libs.union(appleseedpython_libs)
# Get shared libs needed by libraries.
lib_libs = set()
for lib in all_libs:
libs = self.__get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = all_libs.union(lib_libs)
# Copy all shared libraries.
for lib in all_libs:
shutil.copy(lib, lib_dir)
def post_process_package(self):
progress("Linux-specific: Post-processing package")
for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")):
self.run("chrpath -r \$ORIGIN/../lib " + bin)
for lib in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "lib", "*.so")):
self.run("chrpath -d " + lib)
appleseed_python_dir = os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed")
for py_cpp_module in glob.glob(os.path.join(appleseed_python_dir, "*.so")):
self.run("chrpath -r \$ORIGIN/../ " + py_cpp_module)
def __is_system_lib(self, lib):
for prefix in self.SYSTEM_LIBS_PREFIXES:
if lib.startswith(prefix):
return True
return False
def __get_dependencies_for_file(self, filepath):
returncode, out, err = self.run_subprocess(["ldd", filepath])
if returncode != 0:
fatal("Failed to invoke ldd(1) to get dependencies for {0}: {1}".format(filepath, err))
libs = set()
for line in out.split("\n"):
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Ignore system libs.
if self.__is_system_lib(line):
continue
# Ignore appleseed libs.
if "libappleseed" in line:
continue
libs.add(line.split()[2])
return libs
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
colorama.init()
parser = argparse.ArgumentParser(description="build a blenderseed package from sources")
parser.add_argument("--nozip", action="store_true", help="copies appleseed binaries to blenderseed folder but does not build a release package")
args = parser.parse_args()
no_release = args.nozip
package_version = subprocess.Popen("git describe --long", stdout=subprocess.PIPE, shell=True).stdout.read().strip()
build_date = datetime.date.today().isoformat()
print("blenderseed.package version " + VERSION)
print("")
settings = Settings()
settings.load()
settings.print_summary()
if os.name == "nt":
package_builder = WindowsPackageBuilder(settings, package_version, build_date, no_release)
elif os.name == "posix" and platform.mac_ver()[0] != "":
package_builder = MacPackageBuilder(settings, package_version, build_date, no_release)
elif os.name == "posix" and platform.mac_ver()[0] == "":
package_builder = LinuxPackageBuilder(settings, package_version, build_date, no_release)
else:
fatal("Unsupported platform: " + os.name)
package_builder.build_package()
if __name__ == "__main__":
main()
|
dictoon/blenderseed
|
scripts/blenderseed.package.py
|
Python
|
mit
| 26,912
|
[
"VisIt"
] |
8c4e125b5c2431111a0e6ebcea5b3a6870617ea7adf898110808bdf60b9cb543
|
# This wizard contributed by Ezequiel "Zac" Panepucci 011114
# modified by Warren L. DeLano
from pymol.wizard import Wizard
from pymol import cmd
import pymol
class Label(Wizard):
atom=None
messages=1
labeling=1
obj_name=None
def __init__(self,_self=cmd):
Wizard.__init__(self,_self)
self.cmd.unpick()
def get_prompt(self):
self.prompt = []
if (not self.messages):
return None
if (self.atom == None):
self.prompt = ['Click atoms...']
else:
if self.atom.chain == '':
self.prompt.append( '%s %s %s %s B = %.2f XYZ = %.3f %.3f %.3f' %
(self.obj_name,
self.atom.resn,
self.atom.resi,
self.atom.name,
self.atom.b,
self.atom.coord[0],
self.atom.coord[1],
self.atom.coord[2]) )
else:
self.prompt.append('%s %s %s%s %s B = %.2f XYZ = %.3f %.3f %.3f' %
(self.obj_name,
self.atom.resn,
self.atom.chain,
self.atom.resi,
self.atom.name,
self.atom.b,
self.atom.coord[0],
self.atom.coord[1],
self.atom.coord[2]) )
return self.prompt
def toggle_messages(self):
self.messages = not self.messages
def toggle_labeling(self):
self.labeling = not self.labeling
def get_panel(self):
return [
[ 1, 'Labeling',''],
[ 2, 'Toggle add/erase','cmd.get_wizard().toggle_labeling()'],
[ 2, 'Toggle messages','cmd.get_wizard().toggle_messages()'],
[ 2, 'Clear All','cmd.label()'],
[ 2, 'Done','cmd.set_wizard()'],
]
def do_pick(self,bondFlag):
self.obj_name = None
# if 'pk1' in cmd.get_names('selections'):
if cmd.count_atoms('pk1',1):
self.obj_name = cmd.identify('pk1',1)[0][0]
model = cmd.get_model("(pk1)")
self.atom = model.atom.pop()
if not self.labeling:
cmd.label("(pk1)", '""')
elif self.atom.name == 'CA':
cmd.label("(pk1)", '" %s %s" % (resn,resi)')
else:
cmd.label("(pk1)", '" %s %s" % (name,resi)')
cmd.unpick()
cmd.refresh_wizard()
|
gratefulfrog/lib
|
python/pymol/wizard/label.py
|
Python
|
gpl-2.0
| 2,783
|
[
"PyMOL"
] |
ac35ea0a93f948baf70e1614626eb3b1cde64aef712bd839b4efcfba3a047ecd
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
import os
import yaml
from toscaparser.elements.interfaces import InterfacesDef
from toscaparser.functions import GetInput
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.utils.gettextutils import _
SECTIONS = (TYPE, PROPERTIES, MEDADATA, DEPENDS_ON, UPDATE_POLICY,
DELETION_POLICY) = \
('type', 'properties', 'metadata',
'depends_on', 'update_policy', 'deletion_policy')
HEAT_TEMPLATE_VERSION = '2013-05-23'
HEAT_DESCRIPTION = 'Scaling template'
policy_type = ['tosca.policies.Placement',
'tosca.policies.Scaling',
'tosca.policies.Scaling.Cluster',
'tosca.policies.Monitoring',
'tosca.policies.Reservation',
'tosca.policies.nfv.InstantiationLevels',
'tosca.policies.nfv.ScalingAspects',
'tosca.policies.nfv.VduInitialDelta',
'tosca.policies.nfv.VduInstantiationLevels',
'tosca.policies.nfv.VduScalingAspectDeltas',
'tosca.policies.nfv.VirtualLinkInstantiationLevels']
log = logging.getLogger('heat-translator')
class HotResource(object):
'''Base class for TOSCA node type translation to Heat resource type.'''
def __init__(self, nodetemplate, name=None, type=None, properties=None,
metadata=None, depends_on=None,
update_policy=None, deletion_policy=None, csar_dir=None):
log.debug(_('Translating TOSCA node type to HOT resource type.'))
self.nodetemplate = nodetemplate
if name:
self.name = name
else:
self.name = nodetemplate.name
self.type = type
self.properties = properties or {}
self.csar_dir = csar_dir
# special case for HOT softwareconfig
cwd = os.getcwd()
if type == 'OS::Heat::SoftwareConfig':
config = self.properties.get('config')
if isinstance(config, dict):
if self.csar_dir:
os.chdir(self.csar_dir)
implementation_artifact = os.path.abspath(config.get(
'get_file'))
else:
implementation_artifact = config.get('get_file')
if implementation_artifact:
filename, file_extension = os.path.splitext(
implementation_artifact)
file_extension = file_extension.lower()
# artifact_types should be read to find the exact script
# type, unfortunately artifact_types doesn't seem to be
# supported by the parser
if file_extension == '.ansible' \
or file_extension == '.yaml' \
or file_extension == '.yml':
self.properties['group'] = 'ansible'
if file_extension == '.pp':
self.properties['group'] = 'puppet'
if self.properties.get('group') is None:
self.properties['group'] = 'script'
os.chdir(cwd)
self.metadata = metadata
# The difference between depends_on and depends_on_nodes is
# that depends_on defines dependency in the context of the
# HOT template and it is used during the template output.
# Depends_on_nodes defines the direct dependency between the
# tosca nodes and is not used during the output of the
# HOT template but for internal processing only. When a tosca
# node depends on another node it will be always added to
# depends_on_nodes but not always to depends_on. For example
# if the source of dependency is a server, the dependency will
# be added as properties.get_resource and not depends_on
if depends_on:
self.depends_on = depends_on
self.depends_on_nodes = depends_on
else:
self.depends_on = []
self.depends_on_nodes = []
self.update_policy = update_policy
self.deletion_policy = deletion_policy
self.group_dependencies = {}
# if hide_resource is set to true, then this resource will not be
# generated in the output yaml.
self.hide_resource = False
def handle_properties(self):
# the property can hold a value or the intrinsic function get_input
# for value, copy it
# for get_input, convert to get_param
for prop in self.nodetemplate.get_properties_objects():
pass
def handle_life_cycle(self):
hot_resources = []
deploy_lookup = {}
# TODO(anyone): sequence for life cycle needs to cover different
# scenarios and cannot be fixed or hard coded here
operations_deploy_sequence = ['create', 'configure', 'start']
operations = HotResource.get_all_operations(self.nodetemplate)
# create HotResource for each operation used for deployment:
# create, start, configure
# ignore the other operations
# observe the order: create, start, configure
# use the current HotResource for the first operation in this order
# hold the original name since it will be changed during
# the transformation
node_name = self.name
reserve_current = 'NONE'
for operation in operations_deploy_sequence:
if operation in operations.keys():
reserve_current = operation
break
# create the set of SoftwareDeployment and SoftwareConfig for
# the interface operations
hosting_server = None
if self.nodetemplate.requirements is not None:
hosting_server = self._get_hosting_server()
sw_deployment_resource = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resource.server_key
servers = sw_deployment_resource.servers
sw_deploy_res = sw_deployment_resource.software_deployment
# hosting_server is None if requirements is None
hosting_on_server = hosting_server if hosting_server else None
base_type = HotResource.get_base_type_str(
self.nodetemplate.type_definition)
# if we are on a compute node the host is self
if hosting_on_server is None and base_type == 'tosca.nodes.Compute':
hosting_on_server = self.name
servers = {'get_resource': self.name}
cwd = os.getcwd()
for operation in operations.values():
if operation.name in operations_deploy_sequence:
config_name = node_name + '_' + operation.name + '_config'
deploy_name = node_name + '_' + operation.name + '_deploy'
if self.csar_dir:
os.chdir(self.csar_dir)
get_file = os.path.abspath(operation.implementation)
else:
get_file = operation.implementation
hot_resources.append(
HotResource(self.nodetemplate,
config_name,
'OS::Heat::SoftwareConfig',
{'config':
{'get_file': get_file}},
csar_dir=self.csar_dir))
if operation.name == reserve_current and \
base_type != 'tosca.nodes.Compute':
deploy_resource = self
self.name = deploy_name
self.type = sw_deploy_res
self.properties = {'config': {'get_resource': config_name},
server_key: servers}
deploy_lookup[operation] = self
else:
sd_config = {'config': {'get_resource': config_name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
deploy_lookup[operation] = deploy_resource
lifecycle_inputs = self._get_lifecycle_inputs(operation)
if lifecycle_inputs:
deploy_resource.properties['input_values'] = \
lifecycle_inputs
os.chdir(cwd)
# Add dependencies for the set of HOT resources in the sequence defined
# in operations_deploy_sequence
# TODO(anyone): find some better way to encode this implicit sequence
group = {}
op_index_min = None
op_index_max = -1
for op, hot in deploy_lookup.items():
# position to determine potential preceding nodes
op_index = operations_deploy_sequence.index(op.name)
if op_index_min is None or op_index < op_index_min:
op_index_min = op_index
if op_index > op_index_max:
op_index_max = op_index
for preceding_op_name in \
reversed(operations_deploy_sequence[:op_index]):
preceding_hot = deploy_lookup.get(
operations.get(preceding_op_name))
if preceding_hot:
hot.depends_on.append(preceding_hot)
hot.depends_on_nodes.append(preceding_hot)
group[preceding_hot] = hot
break
if op_index_max >= 0:
last_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_max]))
else:
last_deploy = None
# save this dependency chain in the set of HOT resources
self.group_dependencies.update(group)
for hot in hot_resources:
hot.group_dependencies.update(group)
roles_deploy_resource = self._handle_ansiblegalaxy_roles(
hot_resources, node_name, servers)
# add a dependency to this ansible roles deploy to
# the first "classic" deploy generated for this node
if roles_deploy_resource and op_index_min:
first_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_min]))
first_deploy.depends_on.append(roles_deploy_resource)
first_deploy.depends_on_nodes.append(roles_deploy_resource)
return hot_resources, deploy_lookup, last_deploy
def _handle_ansiblegalaxy_roles(self, hot_resources, initial_node_name,
hosting_on_server):
artifacts = self.get_all_artifacts(self.nodetemplate)
install_roles_script = ''
sw_deployment_resource = \
HOTSoftwareDeploymentResources(hosting_on_server)
server_key = sw_deployment_resource.server_key
sw_deploy_res = sw_deployment_resource.software_deployment
for artifact_name, artifact in artifacts.items():
artifact_type = artifact.get('type', '').lower()
if artifact_type == 'tosca.artifacts.ansiblegalaxy.role':
role = artifact.get('file', None)
if role:
install_roles_script += 'ansible-galaxy install ' + role \
+ '\n'
if install_roles_script:
# remove trailing \n
install_roles_script = install_roles_script[:-1]
# add shebang and | to use literal scalar type (for multiline)
install_roles_script = '|\n#!/bin/bash\n' + install_roles_script
config_name = initial_node_name + '_install_roles_config'
deploy_name = initial_node_name + '_install_roles_deploy'
hot_resources.append(
HotResource(self.nodetemplate, config_name,
'OS::Heat::SoftwareConfig',
{'config': install_roles_script},
csar_dir=self.csar_dir))
sd_config = {'config': {'get_resource': config_name},
server_key: hosting_on_server}
deploy_resource = \
HotResource(self.nodetemplate, deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
return deploy_resource
def handle_connectsto(self, tosca_source, tosca_target, hot_source,
hot_target, config_location, operation):
# The ConnectsTo relationship causes a configuration operation in
# the target.
# This hot resource is the software config portion in the HOT template
# This method adds the matching software deployment with the proper
# target server and dependency
if config_location == 'target':
hosting_server = hot_target._get_hosting_server()
hot_depends = hot_target
elif config_location == 'source':
hosting_server = self._get_hosting_server()
hot_depends = hot_source
sw_deployment_resource = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resource.server_key
servers = sw_deployment_resource.servers
sw_deploy_res = sw_deployment_resource.software_deployment
deploy_name = tosca_source.name + '_' + tosca_target.name + \
'_connect_deploy'
sd_config = {'config': {'get_resource': self.name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config,
depends_on=[hot_depends], csar_dir=self.csar_dir)
connect_inputs = self._get_connect_inputs(config_location, operation)
if connect_inputs:
deploy_resource.properties['input_values'] = connect_inputs
return deploy_resource
def handle_expansion(self):
pass
def handle_hosting(self):
# handle hosting server for the OS:HEAT::SoftwareDeployment
# from the TOSCA nodetemplate, traverse the relationship chain
# down to the server
sw_deploy_group = \
HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
sw_deploy = HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_RESOURCE
if self.properties.get('servers') and \
self.properties.get('server'):
del self.properties['server']
if self.type == sw_deploy_group or self.type == sw_deploy:
# skip if already have hosting
# If type is NodeTemplate, look up corresponding HotResrouce
host_server = self.properties.get('servers') \
or self.properties.get('server')
if host_server is None:
raise Exception(_("Internal Error: expecting host "
"in software deployment"))
elif isinstance(host_server.get('get_resource'), NodeTemplate):
self.properties['server']['get_resource'] = \
host_server['get_resource'].name
elif isinstance(host_server, dict) and \
not host_server.get('get_resource'):
self.properties['servers'] = \
host_server
def top_of_chain(self):
dependent = self.group_dependencies.get(self)
if dependent is None:
return self
else:
return dependent.top_of_chain()
# this function allows to provides substacks as external files
# those files will be dumped along the output file.
#
# return a dict of filename-content
def extract_substack_templates(self, base_filename, hot_template_version):
return {}
# this function asks the resource to embed substacks
# into the main template, if any.
# this is used when the final output is stdout
def embed_substack_templates(self, hot_template_version):
pass
def get_dict_output(self):
resource_sections = OrderedDict()
resource_sections[TYPE] = self.type
if self.properties:
resource_sections[PROPERTIES] = self.properties
if self.metadata:
resource_sections[MEDADATA] = self.metadata
if self.depends_on:
resource_sections[DEPENDS_ON] = []
for depend in self.depends_on:
resource_sections[DEPENDS_ON].append(depend.name)
if self.update_policy:
resource_sections[UPDATE_POLICY] = self.update_policy
if self.deletion_policy:
resource_sections[DELETION_POLICY] = self.deletion_policy
return {self.name: resource_sections}
def _get_lifecycle_inputs(self, operation):
# check if this lifecycle operation has input values specified
# extract and convert to HOT format
if isinstance(operation.value, str):
# the operation has a static string
return {}
# the operation is a dict {'implemenation': xxx, 'input': yyy}
inputs = operation.value.get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_connect_inputs(self, config_location, operation):
if config_location == 'target':
inputs = operation.get('pre_configure_target').get('inputs')
elif config_location == 'source':
inputs = operation.get('pre_configure_source').get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_hosting_server(self, node_template=None):
# find the server that hosts this software by checking the
# requirements and following the hosting chain
hosting_servers = []
host_exists = False
this_node_template = self.nodetemplate \
if node_template is None else node_template
for requirement in this_node_template.requirements:
for requirement_name, assignment in requirement.items():
for check_node in this_node_template.related_nodes:
# check if the capability is Container
if isinstance(assignment, dict):
node_name = assignment.get('node')
else:
node_name = assignment
if node_name and node_name == check_node.name:
if self._is_container_type(requirement_name,
check_node):
hosting_servers.append(check_node.name)
host_exists = True
elif check_node.related_nodes and not host_exists:
return self._get_hosting_server(check_node)
if hosting_servers:
return hosting_servers
return None
def _is_container_type(self, requirement_name, node):
# capability is a list of dict
# For now just check if it's type tosca.nodes.Compute
# TODO(anyone): match up requirement and capability
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type == 'tosca.nodes.Compute':
return True
else:
return False
def get_hot_attribute(self, attribute, args):
# this is a place holder and should be implemented by the subclass
# if translation is needed for the particular attribute
raise Exception(_("No translation in TOSCA type {0} for attribute "
"{1}").format(self.nodetemplate.type, attribute))
def get_tosca_props(self):
tosca_props = {}
for prop in self.nodetemplate.get_properties_objects():
if isinstance(prop.value, GetInput):
tosca_props[prop.name] = {'get_param': prop.value.input_name}
else:
tosca_props[prop.name] = prop.value
return tosca_props
def represent_ordereddict(self, dumper, data):
nodes = []
for key, value in data.items():
node_key = dumper.represent_data(key)
node_value = dumper.represent_data(value)
nodes.append((node_key, node_value))
return yaml.nodes.MappingNode('tag:yaml.org,2002:map', nodes)
def _handle_nested_template(self, scale_res, yaml_name,
hot_template_parameters,
parameters=None):
template_dict = OrderedDict()
template_dict['heat_template_version'] = HEAT_TEMPLATE_VERSION
template_dict['description'] = HEAT_DESCRIPTION
if parameters is not None:
template_dict['parameters'] = parameters
if hot_template_parameters:
all_params = OrderedDict()
for parameter in hot_template_parameters:
all_params.update(parameter.get_dict_output())
template_dict.update({'parameters': all_params})
template_dict["resources"] = {}
dict_res = OrderedDict()
for res in scale_res:
dict_res = res.get_dict_output()
res_name = list(dict_res.keys())[0]
template_dict["resources"][res_name] = \
dict_res[res_name]
yaml.add_representer(OrderedDict, self.represent_ordereddict)
yaml.add_representer(dict, self.represent_ordereddict)
yaml_string = yaml.dump(template_dict, default_flow_style=False)
yaml_string = yaml_string.replace('\'', '').replace('\n\n', '\n')
nested_template = {
yaml_name: yaml_string
}
return nested_template
def remove_depends_on(self, depends_on_set):
# Remove all depends_on including depends_on_set.
for rel, node in self.nodetemplate.relationships.items():
for do in depends_on_set:
if rel.is_derived_from(do):
for hot_resource in self.depends_on_nodes:
if node.name == hot_resource.name and \
hot_resource in self.depends_on:
self.depends_on.remove(hot_resource)
break
@staticmethod
def get_all_artifacts(nodetemplate):
# workaround bug in the parser
base_type = HotResource.get_base_type_str(nodetemplate.type_definition)
if base_type in policy_type:
artifacts = {}
else:
artifacts = nodetemplate.type_definition.get_value('artifacts',
parent=True)
if not artifacts:
artifacts = {}
tpl_artifacts = nodetemplate.entity_tpl.get('artifacts')
if tpl_artifacts:
artifacts.update(tpl_artifacts)
return artifacts
@staticmethod
def get_all_operations(node):
operations = {}
for operation in node.interfaces:
operations[operation.name] = operation
# workaround bug in the parser
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type in policy_type:
return operations
node_type = node.type_definition
while True:
type_operations = HotResource._get_interface_operations_from_type(
node_type, node, 'Standard')
type_operations.update(operations)
operations = type_operations
if node_type.parent_type is not None:
node_type = node_type.parent_type
else:
return operations
@staticmethod
def _get_interface_operations_from_type(node_type, node, lifecycle_name):
operations = {}
base_type = HotResource.get_base_type_str(node_type)
if base_type in policy_type:
return operations
if node_type.interfaces and lifecycle_name in node_type.interfaces:
for name, elems in node_type.interfaces[lifecycle_name].items():
# ignore empty operations (only type)
# ignore global interface inputs,
# concrete inputs are on the operations themselves
if name != 'type' and name != 'inputs':
operations[name] = InterfacesDef(node_type,
lifecycle_name,
node, name, elems)
return operations
@staticmethod
def get_base_type_str(node_type):
if isinstance(node_type, str):
return node_type
if node_type.parent_type is not None:
parent_type_str = None
if isinstance(node_type.parent_type, str):
parent_type_str = node_type.parent_type
else:
parent_type_str = node_type.parent_type.type
if parent_type_str and parent_type_str.endswith('.Root'):
return node_type.type
return HotResource.get_base_type_str(node_type.parent_type)
return node_type.type
class HOTSoftwareDeploymentResources(object):
"""Provides HOT Software Deployment resources
SoftwareDeployment or SoftwareDeploymentGroup Resource
"""
HOT_SW_DEPLOYMENT_RESOURCE = 'OS::Heat::SoftwareDeployment'
HOT_SW_DEPLOYMENT_GROUP_RESOURCE = 'OS::Heat::SoftwareDeploymentGroup'
def __init__(self, hosting_server=None):
self.software_deployment = self.HOT_SW_DEPLOYMENT_RESOURCE
self.software_deployment_group = self.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
self.server_key = 'server'
self.hosting_server = hosting_server
self.servers = {}
if hosting_server is not None:
if len(self.hosting_server) == 1:
if isinstance(hosting_server, list):
self.servers['get_resource'] = self.hosting_server[0]
else:
for server in self.hosting_server:
self.servers[server] = {'get_resource': server}
self.software_deployment = self.software_deployment_group
self.server_key = 'servers'
|
openstack/heat-translator
|
translator/hot/syntax/hot_resource.py
|
Python
|
apache-2.0
| 27,182
|
[
"Galaxy"
] |
f7a65b708a5a7609ebe3da3cae22bac401539fc465e92e814767dc21c8a4cceb
|
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
from RenderWindow import RenderWindow
import base
import utils
import misc
import annotations
import exodus
import geometric
import graphs
import filters
|
Chuban/moose
|
python/chigger/__init__.py
|
Python
|
lgpl-2.1
| 1,047
|
[
"MOOSE"
] |
0a7335eb0d28ecbc57c2eec2d93abc5460ec56b9bb3df75f1836076b8c833b59
|
"""
API for initiating and tracking requests for credit from a provider.
"""
import datetime
import logging
import pytz
import uuid
from django.db import transaction
from openedx.core.djangoapps.credit.exceptions import (
UserIsNotEligible,
CreditProviderNotConfigured,
RequestAlreadyCompleted,
CreditRequestNotFound,
InvalidCreditStatus,
)
from openedx.core.djangoapps.credit.models import (
CreditProvider,
CreditRequirementStatus,
CreditRequest,
CreditEligibility,
)
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
from student.models import User
from util.date_utils import to_timestamp
log = logging.getLogger(__name__)
def get_credit_providers():
"""
Retrieve all available credit providers.
Example:
>>> get_credit_providers()
[
{
"id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry"
},
...
]
Returns: list
"""
return CreditProvider.get_credit_providers()
def get_credit_provider_info(provider_id):
"""Retrieve the 'CreditProvider' model data against provided
credit provider.
Args:
provider_id (str): The identifier for the credit provider
Returns: 'CreditProvider' data dictionary
Example Usage:
>>> get_credit_provider_info("hogwarts")
{
"provider_id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"provider_url": "https://credit.example.com/",
"provider_status_url": "https://credit.example.com/status/",
"provider_description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": False,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
}
"""
credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id)
credit_provider_data = {}
if credit_provider:
credit_provider_data = {
"provider_id": credit_provider.provider_id,
"display_name": credit_provider.display_name,
"provider_url": credit_provider.provider_url,
"provider_status_url": credit_provider.provider_status_url,
"provider_description": credit_provider.provider_description,
"enable_integration": credit_provider.enable_integration,
"fulfillment_instructions": credit_provider.fulfillment_instructions
}
return credit_provider_data
@transaction.commit_on_success
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": 0.95,
"user_username": "ron",
"user_email": "ron@example.com",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
u'User "%s" tried to initiate a request for credit in course "%s", '
u'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible
except CreditProvider.DoesNotExist:
log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
if shared_secret_key is None:
msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
provider_id=credit_provider.provider_id
)
log.error(msg)
raise CreditProviderNotConfigured(msg)
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
u'Cannot initiate credit request because the request with UUID "%s" '
u'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
status="satisfied"
).reason["final_grade"]
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
log.exception(
"Could not retrieve final grade from the credit eligibility table "
"for user %s in course %s.",
user.id, course_key
)
raise UserIsNotEligible
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": (
user.profile.mailing_address
if user.profile.mailing_address is not None
else ""
),
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
u'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
}
def update_credit_request_status(request_uuid, provider_id, status):
"""
Update the status of a credit request.
Approve or reject a request for a student to receive credit in a course
from a particular credit provider.
This function does NOT check that the status update is authorized.
The caller needs to handle authentication and authorization (checking the signature
of the message received from the credit provider)
The function is idempotent; if the request has already been updated to the status,
the function does nothing.
Arguments:
request_uuid (str): The unique identifier for the credit request.
provider_id (str): Identifier for the credit provider.
status (str): Either "approved" or "rejected"
Returns: None
Raises:
CreditRequestNotFound: No request exists that is associated with the given provider.
InvalidCreditStatus: The status is not either "approved" or "rejected".
"""
if status not in [CreditRequest.REQUEST_STATUS_APPROVED, CreditRequest.REQUEST_STATUS_REJECTED]:
raise InvalidCreditStatus
try:
request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id)
old_status = request.status
request.status = status
request.save()
log.info(
u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".',
request_uuid, old_status, status, provider_id
)
except CreditRequest.DoesNotExist:
msg = (
u'Credit provider with ID "{provider_id}" attempted to '
u'update request with UUID "{request_uuid}", but no request '
u'with this UUID is associated with the provider.'
).format(provider_id=provider_id, request_uuid=request_uuid)
log.warning(msg)
raise CreditRequestNotFound(msg)
def get_credit_requests_for_user(username):
"""
Retrieve the status of a credit request.
Returns either "pending", "approved", or "rejected"
Arguments:
username (unicode): The username of the user who initiated the requests.
Returns: list
Example Usage:
>>> get_credit_request_status_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return CreditRequest.credit_requests_for_user(username)
def get_credit_request_status(username, course_key):
"""Get the credit request status.
This function returns the status of credit request of user for given course.
It returns the latest request status for the any credit provider.
The valid status are 'pending', 'approved' or 'rejected'.
Args:
username(str): The username of user
course_key(CourseKey): The course locator key
Returns:
A dictionary of credit request user has made if any
"""
credit_request = CreditRequest.get_user_request_status(username, course_key)
return {
"uuid": credit_request.uuid,
"timestamp": credit_request.modified,
"course_key": credit_request.course.course_key,
"provider": {
"id": credit_request.provider.provider_id,
"display_name": credit_request.provider.display_name
},
"status": credit_request.status
} if credit_request else {}
|
martynovp/edx-platform
|
openedx/core/djangoapps/credit/api/provider.py
|
Python
|
agpl-3.0
| 14,252
|
[
"VisIt"
] |
1ba96380a1675466e15d92715f2e9c4bab4c4840f08b40cc7d783fddf5f365d2
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Script to select random lines from a file. Reads entire file into
memory!
TODO: Replace this with a more elegant implementation.
"""
import sys
import random
ndesired = int( sys.argv[1] )
for line in random.sample( sys.stdin.readlines(), ndesired ):
print line,
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/random_lines.py
|
Python
|
bsd-3-clause
| 316
|
[
"Galaxy"
] |
8a1ae7ea3e965f77bef883917321fecc5e60761a0280a0ddcc6ef914c3b56ab1
|
import os
import math
import torch
import shutil
import numpy as np
import matplotlib.pyplot as plt
import pyprob
from pyprob import Model
from pyprob.distributions import Uniform, Normal
class GaussianWithUnknownMean(Model):
def __init__(self):
super().__init__('Gaussian with unknown mean')
self.prior_mean = 0.
self.prior_stddev = 1.
self.likelihood_stddev = math.sqrt(0.2)
self.prior_true = Normal(self.prior_mean, self.prior_stddev)
def posterior_true(self, obs):
n = len(obs)
posterior_var = 1/(n/self.likelihood_stddev**2 + 1/self.prior_stddev**2)
posterior_mu = posterior_var * (self.prior_mean/self.prior_stddev**2 + n*np.mean(obs)/self.likelihood_stddev**2)
return Normal(posterior_mu, math.sqrt(posterior_var))
def rejection_sampling(self):
u = pyprob.sample(Uniform(0, 1), control=False)
if u > 0.5:
while True:
x = pyprob.sample(Normal(self.prior_mean, self.prior_stddev * 4), replace=True)
u2 = pyprob.sample(Uniform(0, 1), control=False)
if x < 0 and u2 < 0.25 * torch.exp(Normal(self.prior_mean, self.prior_stddev).log_prob(x) - Normal(self.prior_mean, self.prior_stddev*4).log_prob(x)):
return x
else:
while True:
x = pyprob.sample(Normal(self.prior_mean, self.prior_stddev), replace=True)
if x >= 0:
return x
def forward(self):
mu = self.rejection_sampling()
likelihood = Normal(mu, self.likelihood_stddev)
pyprob.observe(likelihood, name='obs0')
return mu
def produce_results(results_dir):
infer_traces = 2500
train_traces = 10000
model = GaussianWithUnknownMean()
if os.path.exists(results_dir):
shutil.rmtree(results_dir)
pyprob.util.create_path(results_dir, directory=True)
prior_samples = model.prior_results(num_traces=infer_traces)
fig = plt.figure(figsize=(10, 5))
model.prior_true.plot(label='True prior', min_val=-5, max_val=5, show=False, fig=fig)
prior_samples.plot_histogram(label='Empirical prior', alpha=0.75, show=False, bins=50, fig=fig)
plt.legend()
prior_plot_file_name = os.path.join(results_dir, 'prior.pdf')
plt.savefig(prior_plot_file_name)
is_posterior_samples = model.posterior_results(num_traces=infer_traces, inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING, observe={'obs0': 0.})
fig = plt.figure(figsize=(10, 5))
model.posterior_true([0.]).plot(label='True posterior', min_val=-5, max_val=5, show=False, fig=fig)
is_posterior_samples.unweighted().plot_histogram(label='Empirical proposal', alpha=0.75, show=False, bins=50, fig=fig)
is_posterior_samples.plot_histogram(label='Empirical posterior', alpha=0.75, show=False, bins=50, fig=fig)
plt.legend()
is_posterior_plot_file_name = os.path.join(results_dir, 'posterior_IS.pdf')
plt.savefig(is_posterior_plot_file_name)
model.learn_inference_network(num_traces=train_traces, observe_embeddings={'obs0' : {'dim' : 32}}, inference_network=pyprob.InferenceNetwork.LSTM)
ic_iw0_posterior_samples = model.posterior_results(num_traces=infer_traces, inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe={'obs0': 0.}, importance_weighting=pyprob.ImportanceWeighting.IW0)
fig = plt.figure(figsize=(10, 5))
model.posterior_true([0.]).plot(label='True posterior', min_val=-5, max_val=5, show=False, fig=fig)
ic_iw0_posterior_samples.unweighted().plot_histogram(label='Empirical proposal', alpha=0.75, show=False, bins=50, fig=fig)
ic_iw0_posterior_samples.plot_histogram(label='Empirical posterior', alpha=0.75, show=False, bins=50, fig=fig)
plt.legend()
ic_iw0_posterior_plot_file_name = os.path.join(results_dir, 'posterior_IC_IW0.pdf')
plt.savefig(ic_iw0_posterior_plot_file_name)
ic_iw1_posterior_samples = model.posterior_results(num_traces=infer_traces, inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe={'obs0': 0.}, importance_weighting=pyprob.ImportanceWeighting.IW1)
fig = plt.figure(figsize=(10, 5))
model.posterior_true([0.]).plot(label='True posterior', min_val=-5, max_val=5, show=False, fig=fig)
ic_iw1_posterior_samples.unweighted().plot_histogram(label='Empirical proposal', alpha=0.75, show=False, bins=50, fig=fig)
ic_iw1_posterior_samples.plot_histogram(label='Empirical posterior', alpha=0.75, show=False, bins=50, fig=fig)
plt.legend()
ic_iw1_posterior_plot_file_name = os.path.join(results_dir, 'posterior_IC_IW1.pdf')
plt.savefig(ic_iw1_posterior_plot_file_name)
if __name__ == '__main__':
pyprob.set_random_seed(1)
current_dir = os.path.dirname(os.path.abspath(__file__))
print('Current dir: {}'.format(current_dir))
results_dir = os.path.join(current_dir, 'rejection_sampling')
produce_results(results_dir=results_dir)
print('Done')
|
probprog/pyprob
|
tests/extra/inference_compilation/rejection_sampling.py
|
Python
|
bsd-2-clause
| 5,023
|
[
"Gaussian"
] |
33e6ea6b286c01afbb6d987227232442894a0d2548420c4f587770c6f3800a33
|
"""User-friendly public interface to polynomial functions. """
from __future__ import print_function, division
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple
)
from sympy.core.mul import _keep_coeff
from sympy.core.symbol import Symbol
from sympy.core.basic import preorder_traversal
from sympy.core.relational import Relational
from sympy.core.sympify import sympify
from sympy.core.decorators import _sympifyit
from sympy.core.function import Derivative
from sympy.core.compatibility import as_int, SYMPY_INTS
from sympy.logic.boolalg import BooleanAtom
from sympy.polys.polyclasses import DMP
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.monomials import Monomial
from sympy.polys.orderings import monomial_key
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.utilities import group, sift, public
import sympy.polys
import mpmath
from mpmath.libmp.libhyper import NoConvergence
from sympy.polys.domains import FF, QQ, ZZ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
from sympy.core.compatibility import iterable, range
@public
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.items():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = list(map(domain.convert, rep))
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.symbols:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs]
F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs]
G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.symbols:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens))))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.items():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
indices = set([])
for gen in gens:
try:
index = f.gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True),
field=field, composite=f.domain.is_Composite or None)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func(k, coeff):
... k = k[0]
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.items():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.symbols + f.gens
else:
gens = f.gens + dom.symbols
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Caveat: The function prem(f, g, x) can be safely used to compute
in Z[x] _only_ subresultant polynomial remainder sequences (prs's).
To safely compute Euclidean and Sturmian prs's in Z[x]
employ anyone of the corresponding functions found in
the module sympy.polys.subresultants_qq_zz. The functions
in the module with suffix _pg compute prs's in Z[x] employing
rem(f, g, x), whereas the functions with suffix _amv
compute prs's in Z[x] employing rem_z(f, g, x).
The function rem_z(f, g, x) differs from prem(f, g, x) in that
to compute the remainder polynomials in Z[x] it premultiplies
the divident times the absolute value of the leading coefficient
of the divisor raised to the power degree(f, x) - degree(g, x) + 1.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
See the Caveat note in the function prem(f, g).
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return f.gens.index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
>>> Poly(0, x).degree()
-oo
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogenize(f, s):
"""
Returns the homogeneous polynomial of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you only
want to check if a polynomial is homogeneous, then use
:func:`Poly.is_homogeneous`. If you want not only to check if a
polynomial is homogeneous but also compute its homogeneous order,
then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3)
>>> f.homogenize(z)
Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ')
"""
if not isinstance(s, Symbol):
raise TypeError("``Symbol`` expected, got %s" % type(s))
if s in f.gens:
i = f.gens.index(s)
gens = f.gens
else:
i = len(f.gens)
gens = f.gens + (s,)
if hasattr(f.rep, 'homogenize'):
return f.per(f.rep.homogenize(i), gens=gens)
raise OperationNotSupported(f, 'homogeneous_order')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*sqrt(x), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
if len(N) != len(f.gens):
raise ValueError('exponent of each generator must be specified')
result = f.rep.nth(*list(map(int, N)))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(self, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
f = self
if not f.rep.dom.has_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert or not dom.has_assoc_Ring:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(self, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
f = self
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(self, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
f = self
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs, **kwargs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if not kwargs.get('evaluate', True):
return Derivative(f, *specs, **kwargs)
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
_eval_derivative = diff
_eval_diff = diff
def eval(self, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
f = self
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.items():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return list(map(per, result))
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), list(map(per, R)))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def dispersionset(f, g=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersionset
return dispersionset(f, g)
def dispersion(f, g=None):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersion
return dispersion(f, g)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(self, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return list(map(f.per, result))
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(self, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return list(map(f.per, result))
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [(f.per(g), k) for g, k in result]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [(f.per(g), k) for g, k in factors]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [(f.per(g), k) for g, k in factors]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return list(map(_real, result))
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return list(map(_real, result))
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = list(map(QQ.convert, (re, im))), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = list(map(QQ.convert, (re, im))), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
CRootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.rootof(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[CRootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.CRootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[CRootOf(x**3 + x + 1, 0),
CRootOf(x**3 + x + 1, 1),
CRootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.CRootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Parameters
==========
n ... the number of digits to calculate
maxsteps ... the maximum number of iterations to do
If the accuracy `n` cannot be reached in `maxsteps`, it will raise an
exception. You need to rerun with higher maxsteps.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
# For integer and rational coefficients, convert them to integers only
# (for accuracy). Otherwise just try to convert the coefficients to
# mpmath.mpc and raise an exception if the conversion fails.
if f.rep.dom is ZZ:
coeffs = [int(coeff) for coeff in f.all_coeffs()]
elif f.rep.dom is QQ:
denoms = [coeff.q for coeff in f.all_coeffs()]
from sympy.core.numbers import ilcm
fac = ilcm(*denoms)
coeffs = [int(coeff*fac) for coeff in f.all_coeffs()]
else:
coeffs = [coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs()]
try:
coeffs = [mpmath.mpc(*coeff) for coeff in coeffs]
except TypeError:
raise DomainError("Numerical domain expected, got %s" % \
f.rep.dom)
dps = mpmath.mp.dps
mpmath.mp.dps = n
try:
# We need to add extra precision to guard against losing accuracy.
# 10 times the degree of the polynomial seems to work well.
roots = mpmath.polyroots(coeffs, maxsteps=maxsteps,
cleanup=cleanup, error=False, extraprec=f.degree()*10)
# Mpmath puts real roots first, then complex ones (as does all_roots)
# so we make sure this convention holds here, too.
roots = list(map(sympify,
sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, r.imag))))
except NoConvergence:
raise NoConvergence(
'convergence to root failed; try n < %s or maxsteps > %s' % (
n, maxsteps))
finally:
mpmath.mp.dps = dps
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
__bool__ = __nonzero__
def eq(f, g, strict=False):
if not strict:
return f.__eq__(g)
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
@public
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
try:
rep, opt = _dict_from_expr(expr, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
from sympy.functions.elementary.piecewise import Piecewise
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
try:
reps, opt = _parallel_dict_from_expr(exprs, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, origs, exprs, True)
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree', 1, exc)
return sympify(F.degree(opt.gen))
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert, S
>>> from sympy.core.numbers import mod_inverse
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
For more efficient inversion of Rationals,
use the ``mod_inverse`` function:
>>> mod_inverse(3, 5)
2
>>> (S(2)/5).invert(S(7)/3)
5/2
See Also
========
sympy.core.numbers.mod_inverse
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
def try_non_polynomial_gcd(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
return None
result = try_non_polynomial_gcd(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_gcd(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
def try_non_polynomial_lcm(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
return None
result = try_non_polynomial_lcm(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_lcm(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
from sympy.core.relational import Equality
orig = sympify(f)
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
if isinstance(f, Equality):
return f
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if coeff == 1:
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Examples
========
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
>>> f = x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \
1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x
>>> gff_list(f)
[(x**3 + 7, 2), (x**2 + 5*x, 3)]
>>> ff(x**3 + 7, 2)*ff(x**2 + 5*x, 3) == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
args = [i._eval_factor() if hasattr(i, '_eval_factor') else i
for i in Mul.make_args(expr)]
for arg in args:
if arg.is_Number:
coeff *= arg
continue
if arg.is_Mul:
args.extend(arg.args)
continue
if arg.is_Pow:
base, exp = arg.args
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), -2*sqrt(2) + 2)
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f, f1=None):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if coeffs[-2]:
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f, f1=None):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c and not c.is_rational:
func = Add
if c.is_Add:
args = c.args
func = c.func
else:
args = [c]
sifted = sift(args, lambda z: z.is_rational)
c1, c2 = sifted[True], sifted[False]
alpha = -func(*c2)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
from sympy.core.exprtools import Factors
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for b, wx in f.items() if
b.is_number and wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
f1 = f.monic()
r = _try_rescale(f, f1)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f, f1)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if args.pop('deep', False):
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
"""
from sympy.core.exprtools import factor_terms
from sympy.functions.elementary.piecewise import Piecewise
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
elif isinstance(f, Tuple):
return factor_terms(f)
else:
raise ValueError('unexpected argument: %s' % f)
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
sifted = sift(f.args, lambda x: x.is_commutative is True and not x.has(Piecewise))
c, nc = sifted[True], sifted[False]
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func._from_args(c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
# XXX: This should really skip anything that's not Expr.
if isinstance(e, (tuple, Tuple, BooleanAtom)):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of `solve_poly_system()`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used.
The algorithm can be set using ``method`` flag or with the :func:`setup`
function from :mod:`sympy.polys.polyconfig`:
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('groebner', len(F), exc)
from sympy.polys.rings import PolyRing
ring = PolyRing(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
polys[i] = ring.from_dict(poly.rep.to_dict())
G = _groebner(polys, ring, method=opt.method)
G = [Poly._from_dict(g, opt) for g in G]
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [poly.as_expr() for poly in self._basis]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [Poly._from_dict(dict(g), opt) for g in G]
if not domain.has_Field:
G = [g.clear_denoms(convert=True)[1] for g in G]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
@public
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
kaichogami/sympy
|
sympy/polys/polytools.py
|
Python
|
bsd-3-clause
| 174,060
|
[
"Gaussian"
] |
d2b3b1b2c25dc5809c1a85c867a1c61df11cf94612225e1916ed30d4083d2cb6
|
from math import sqrt, pi
import numpy as np
from ase.units import Bohr
from gpaw.utilities import pack, pack2
from gpaw.analyse.hirshfeld import HirshfeldDensity
from gpaw.utilities.tools import coordinates
from gpaw.mpi import MASTER
def wignerseitz(gd, atoms, scale=None):
"""Determine which atom is closest to each grid point.
The atom distances might be scaled by the scale list."""
if scale is None:
scale = [1.] * len(atoms)
else:
assert(len(scale) == len(atoms))
r_vG, R2min_G = coordinates(gd, atoms[0].position / Bohr)
R2min_G *= scale[0]**2
index_G = gd.zeros(dtype=int)
for a, atom in enumerate(atoms[1:]):
r_vG, r2_G = coordinates(gd, atom.position / Bohr)
r2_G *= scale[a + 1]**2
index_G = np.where(R2min_G > r2_G, a + 1, index_G)
R2min_G = np.where(R2min_G > r2_G, r2_G, R2min_G)
return index_G
class WignerSeitz:
def __init__(self, gd, atoms,
calculator=None, scale=None):
"""Find the grid points nearest to the atoms"""
self.atoms = atoms
self.gd = gd
self.calculator = calculator
self.atom_index = wignerseitz(gd, atoms, scale)
def expand(self, density):
"""Expand a smooth density in Wigner-Seitz cells around the atoms"""
n = len(self.atoms)
weights = np.empty((n,))
for a in range(n):
mask = np.where(self.atom_index == a, density, 0.0)
# XXX Optimize! No need to integrate in zero-region
weights[a] = self.gd.integrate(mask)
return weights
def expand_density(self, nt_G, s, nspins):
"""Get the weights of spin-density in Wigner-Seitz cells
around the atoms. The spin index and number of spins are
needed for the augmentation sphere corrections."""
weights_a = self.expand(nt_G)
for a, nucleus in enumerate(self.atoms):
weights_a[a] += nucleus.get_density_correction(s, nspins)
return weights_a
def expand_wave_function(self, psit_G, u, n):
"""Get the weights of wave function in Wigner-Seitz cells
around the atoms. The spin-k-point index u and band number n
are needed for the augmentation sphere corrections."""
assert psit_G.dtype == float
# smooth part
weigths = self.expand(psit_G**2)
# add augmentation sphere corrections
for a, nucleus in enumerate(self.atoms):
P_i = nucleus.P_uni[u, n]
P_p = pack(np.outer(P_i, P_i))
Delta_p = sqrt(4 * pi) * nucleus.setup.Delta_pL[:, 0]
weigths[a] += np.dot(Delta_p, P_p)
return weigths
def get_effective_volume_ratio(self, atom_index):
"""Effective volume to free volume ratio.
After: Tkatchenko and Scheffler PRL 102 (2009) 073005
"""
atoms = self.atoms
finegd = self.gd
den_g, gd = self.calculator.density.get_all_electron_density(atoms)
assert(gd == finegd)
denfree_g, gd = self.hdensity.get_density([atom_index])
assert(gd == finegd)
# the atoms r^3 grid
position = self.atoms[atom_index].position / Bohr
r_vg, r2_g = coordinates(finegd, origin=position)
r3_g = r2_g * np.sqrt(r2_g)
weight_g = np.where(self.atom_index == atom_index, 1.0, 0.0)
nom = finegd.integrate(r3_g * den_g[0] * weight_g)
denom = finegd.integrate(r3_g * denfree_g)
return nom / denom
def get_effective_volume_ratios(self):
"""Return the list of effective volume to free volume ratios."""
ratios = []
self.hdensity = HirshfeldDensity(self.calculator)
for a, atom in enumerate(self.atoms):
ratios.append(self.get_effective_volume_ratio(a))
return np.array(ratios)
class LDOSbyBand:
"""Base class for a band by band LDOS"""
def by_element(self):
# get element indicees
elemi = {}
for i, nucleus in enumerate(self.paw.atoms):
symbol = nucleus.setup.symbol
if elemi.has_key(symbol):
elemi[symbol].append(i)
else:
elemi[symbol] = [i]
for key in elemi.keys():
elemi[key] = self.get(elemi[key])
return elemi
class WignerSeitzLDOS(LDOSbyBand):
"""Class to get the unfolded LDOS defined by Wigner-Seitz cells"""
def __init__(self, paw):
self.paw = paw
self.ws = WignerSeitz(paw.gd, paw.atoms)
nu = paw.nkpts * paw.nspins
ldos = np.empty((nu, paw.nbands, len(paw.atoms)))
for u, kpt in enumerate(paw.kpt_u):
for n, psit_G in enumerate(kpt.psit_nG):
ldos[u, n, :] = ws.expand_wave_function(psit_G, u, n)
def write(self, filename, slavewrite=False):
if self.world.rank == MASTER or slavewrite:
paw = self.paw
f = open(filename, 'w')
nn = len(paw.atoms)
for k in range(paw.nkpts):
for s in range(paw.nspins):
u = s*paw.nkpts + k
for n in range(paw.nbands):
# avery: Added dummy loop body to make compiling work.
1
|
qsnake/gpaw
|
gpaw/analyse/wignerseitz.py
|
Python
|
gpl-3.0
| 5,301
|
[
"ASE",
"GPAW"
] |
768c8e3e26a7c92d3fe5110492271559398d9e359beda84ce01163f6ac53dd13
|
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import numpy as np
import espressomd
from espressomd import electrostatics
from espressomd import electrostatic_extensions
print("""
=======================================================
= store_properties.py =
=======================================================
Program Information:""")
print(espressomd.features())
espressomd.assert_features(["ELECTROSTATICS"])
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 10
#############################################################
# Setup System #
############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
system.cell_system.max_num_cells = 2744
# Assingn charge to particles
for i in range(n_part // 2 - 1):
system.part[2 * i].q = -1.0
system.part[2 * i + 1].q = 1.0
# P3M setup after charge assigned
#############################################################
p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2)
system.actors.add(p3m)
#############################################################
# Warmup Integration #
#############################################################
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.min_dist()
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.force_cap = lj_cap
# Just to see what else we may get from the c code
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
pprint.pprint(system.thermostat.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__())
# Pickle data
###########################################################
try:
import cPickle as pickle
except ImportError:
import pickle
with open("particle_save", "wb") as particle_save:
pickle.dump(system.part, particle_save, -1)
with open("p3m_save", "wb") as p3m_save:
pickle.dump(p3m, p3m_save, -1)
with open("system_save", "wb") as system_save:
pickle.dump(system, system_save, -1)
with open("thermostat_save", "wb") as thermostat_save:
pickle.dump(system.thermostat, thermostat_save, -1)
with open("nonBondedInter_save", "wb") as bond_save:
pickle.dump(system.non_bonded_inter, bond_save, -1)
# terminate program
print("\nFinished.")
|
KonradBreitsprecher/espresso
|
samples/store_properties.py
|
Python
|
gpl-3.0
| 5,107
|
[
"ESPResSo"
] |
3502fe52fa2a0bdecf8c1c2fe48e53e9e9312b8e80d324851c5f85357b46d78d
|
########################################################################
# $HeadURL$
# File : JobCleaningAgent.py
# Author : A.T.
########################################################################
""" The Job Cleaning Agent controls removing jobs from the WMS in the end of their life cycle.
This agent will take care of removing user jobs,
while production jobs should be removed by the TransformationCleaningAgent.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
import DIRAC.Core.Utilities.Time as Time
import time
import os
class JobCleaningAgent( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
#clients
# FIXME: shouldn't we avoid using the DBs directly, and instead go through the service?
self.jobDB = None
self.taskQueueDB = None
self.jobLoggingDB = None
self.maxJobsAtOnce = 100
self.jobByJob = False
self.throttlingPeriod = 0.
self.removeStatusDelay = {'Done':7,
'Killed':1,
'Failed':7 }
#############################################################################
def initialize( self ):
""" Sets defaults
"""
self.am_setOption( "PollingTime", 120 )
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
self.jobLoggingDB = JobLoggingDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
agentTSTypes = self.am_getOption('ProductionTypes', [])
if agentTSTypes:
self.prod_types = agentTSTypes
else:
self.prod_types = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
gLogger.info( "Will exclude the following Production types from cleaning %s" % ( ', '.join( self.prod_types ) ) )
self.maxJobsAtOnce = self.am_getOption( 'MaxJobsAtOnce', 500 )
self.jobByJob = self.am_getOption( 'JobByJob', False )
self.throttlingPeriod = self.am_getOption('ThrottlingPeriod', 0.)
self.removeStatusDelay['Done'] = self.am_getOption( 'RemoveStatusDelay/Done', 7 )
self.removeStatusDelay['Killed'] = self.am_getOption( 'RemoveStatusDelay/Killed', 7 )
self.removeStatusDelay['Failed'] = self.am_getOption( 'RemoveStatusDelay/Failed', 7 )
return S_OK()
def __getAllowedJobTypes( self ):
""" Get valid jobTypes
"""
result = self.jobDB.getDistinctJobAttributes( 'JobType' )
if not result[ 'OK' ]:
return result
cleanJobTypes = []
for jobType in result[ 'Value' ]:
if jobType not in self.prod_types:
cleanJobTypes.append( jobType )
self.log.notice( "JobTypes to clean %s" % cleanJobTypes )
return S_OK( cleanJobTypes )
#############################################################################
def execute( self ):
""" Remove jobs in various status
"""
#Delete jobs in "Deleted" state
result = self.removeJobsByStatus( { 'Status' : 'Deleted' } )
if not result[ 'OK' ]:
return result
#Get all the Job types that can be cleaned
result = self.__getAllowedJobTypes()
if not result[ 'OK' ]:
return result
# No jobs in the system subject to removal
if not result['Value']:
return S_OK()
baseCond = { 'JobType' : result[ 'Value' ] }
# Remove jobs with final status
for status in self.removeStatusDelay:
delay = self.removeStatusDelay[ status ]
condDict = dict( baseCond )
condDict[ 'Status' ] = status
delTime = str( Time.dateTime() - delay * Time.day )
result = self.removeJobsByStatus( condDict, delTime )
if not result['OK']:
gLogger.warn( 'Failed to remove jobs in status %s' % status )
return S_OK()
def removeJobsByStatus( self, condDict, delay = False ):
""" Remove deleted jobs
"""
if delay:
gLogger.verbose( "Removing jobs with %s and older than %s" % ( condDict, delay ) )
result = self.jobDB.selectJobs( condDict, older = delay, limit = self.maxJobsAtOnce )
else:
gLogger.verbose( "Removing jobs with %s " % condDict )
result = self.jobDB.selectJobs( condDict, limit = self.maxJobsAtOnce )
if not result['OK']:
return result
jobList = result['Value']
if len(jobList) > self.maxJobsAtOnce:
jobList = jobList[:self.maxJobsAtOnce]
if not jobList:
return S_OK()
self.log.notice( "Deleting %s jobs for %s" % ( len( jobList ), condDict ) )
count = 0
error_count = 0
result = SandboxStoreClient( useCertificates = True ).unassignJobs( jobList )
if not result[ 'OK' ]:
gLogger.warn( "Cannot unassign jobs to sandboxes", result[ 'Message' ] )
result = self.deleteJobOversizedSandbox( jobList )
if not result[ 'OK' ]:
gLogger.warn( "Cannot schedule removal of oversized sandboxes", result[ 'Message' ] )
return result
failedJobs = result['Value']['Failed']
for job in failedJobs:
jobList.pop( jobList.index( job ) )
# TODO: we should not remove a job if it still has requests in the RequestManager.
# But this logic should go in the client or in the service, and right now no service expose jobDB.removeJobFromDB
if self.jobByJob:
for jobID in jobList:
resultJobDB = self.jobDB.removeJobFromDB( jobID )
resultTQ = self.taskQueueDB.deleteJob( jobID )
resultLogDB = self.jobLoggingDB.deleteJob( jobID )
errorFlag = False
if not resultJobDB['OK']:
gLogger.warn( 'Failed to remove job %d from JobDB' % jobID, result['Message'] )
errorFlag = True
if not resultTQ['OK']:
gLogger.warn( 'Failed to remove job %d from TaskQueueDB' % jobID, result['Message'] )
errorFlag = True
if not resultLogDB['OK']:
gLogger.warn( 'Failed to remove job %d from JobLoggingDB' % jobID, result['Message'] )
errorFlag = True
if errorFlag:
error_count += 1
else:
count += 1
if self.throttlingPeriod:
time.sleep(self.throttlingPeriod)
else:
result = self.jobDB.removeJobFromDB( jobList )
if not result['OK']:
gLogger.error('Failed to delete %d jobs from JobDB' % len(jobList) )
else:
gLogger.info('Deleted %d jobs from JobDB' % len(jobList) )
for jobID in jobList:
resultTQ = self.taskQueueDB.deleteJob( jobID )
if not resultTQ['OK']:
gLogger.warn( 'Failed to remove job %d from TaskQueueDB' % jobID, resultTQ['Message'] )
error_count += 1
else:
count += 1
result = self.jobLoggingDB.deleteJob( jobList )
if not result['OK']:
gLogger.error('Failed to delete %d jobs from JobLoggingDB' % len(jobList) )
else:
gLogger.info('Deleted %d jobs from JobLoggingDB' % len(jobList) )
if count > 0 or error_count > 0 :
gLogger.info( 'Deleted %d jobs from JobDB, %d errors' % ( count, error_count ) )
return S_OK()
def deleteJobOversizedSandbox( self, jobIDList ):
""" Delete the job oversized sandbox files from storage elements
"""
failed = {}
successful = {}
lfnDict = {}
for jobID in jobIDList:
result = self.jobDB.getJobParameter( jobID, 'OutputSandboxLFN' )
if result['OK']:
lfn = result['Value']
if lfn:
lfnDict[lfn] = jobID
else:
successful[jobID] = 'No oversized sandbox found'
else:
gLogger.warn( 'Error interrogating JobDB: %s' % result['Message'] )
if not lfnDict:
return S_OK( {'Successful':successful, 'Failed':failed} )
# Schedule removal of the LFNs now
for lfn, jobID in lfnDict.items():
result = self.jobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup'] )
if not result['OK']:
failed[jobID] = lfn
continue
if not result['Value']:
failed[jobID] = lfn
continue
ownerDN = result['Value']['OwnerDN']
ownerGroup = result['Value']['OwnerGroup']
result = self.__setRemovalRequest( lfn, ownerDN, ownerGroup )
if not result['OK']:
failed[jobID] = lfn
else:
successful[jobID] = lfn
result = {'Successful':successful, 'Failed':failed}
return S_OK( result )
def __setRemovalRequest( self, lfn, ownerDN, ownerGroup ):
""" Set removal request with the given credentials
"""
oRequest = Request()
oRequest.OwnerDN = ownerDN
oRequest.OwnerGroup = ownerGroup
oRequest.RequestName = os.path.basename( lfn ).strip() + '_removal_request.xml'
oRequest.SourceComponent = 'JobCleaningAgent'
removeFile = Operation()
removeFile.Type = 'RemoveFile'
removedFile = File()
removedFile.LFN = lfn
removeFile.addFile( removedFile )
oRequest.addOperation( removeFile )
return ReqClient().putRequest( oRequest )
|
Sbalbp/DIRAC
|
WorkloadManagementSystem/Agent/JobCleaningAgent.py
|
Python
|
gpl-3.0
| 10,095
|
[
"DIRAC"
] |
1e3fb7edec9a6815ab1145a8b29f7be80881235b57e799bc09d02d30d1ed51a0
|
#!/usr/bin/env python
#encode=utf-8
#vim: tabstop=4 shiftwidth=4 softtabstop=4
#Created on 2013-6-24
#Copyright 2013 nuoqingyun xuqifeng
from bson.code import Code
traffic_map = Code("function () {"
"emit(this.domain, this.bytes);"
"}")
traffic_reduce = Code("function (key, values) {"
" var sum = 0;"
" var count = 0;"
" values.forEach(function(byte){"
" sum += byte;"
" count ++;"
"});"
" return {'sum':sum, 'count':count};"
"}")
traffic_reduce1 = Code("function (keyDomain, valuesBytes) {"
" return Array.sum(valuesBytes);"
"}")
traffic_map_test = Code("function () {"
"emit(this.domain, {bytes:this.bytes, visit:1, hits:this.code});"
"}")
traffic_reduce_test = Code("function (key, values) {"
" var sum = 0;"
" var count = 0;"
" var visits = 0;"
" values.forEach(function(vals){"
" sum += vals.bytes;"
" count += vals.hits;"
" visits += vals.visit;"
"});"
" return {bytes:sum, visit:visits, hits:count};"
"}")
|
homhei/glance
|
glance/db/js.py
|
Python
|
apache-2.0
| 1,449
|
[
"VisIt"
] |
c9e0d407c06b574f446d725d017667ea85d6e3300800b934a0b9245bf862a7cb
|
# encoding: utf-8
"""
A base class for a configurable application.
Authors:
* Brian Granger
* Min RK
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import logging
import os
import re
import sys
from copy import deepcopy
from collections import defaultdict
from IPython.external.decorator import decorator
from IPython.config.configurable import SingletonConfigurable
from IPython.config.loader import (
KVArgParseConfigLoader, PyFileConfigLoader, Config, ArgumentError, ConfigFileNotFound, JSONFileConfigLoader
)
from IPython.utils.traitlets import (
Unicode, List, Enum, Dict, Instance, TraitError
)
from IPython.utils.importstring import import_item
from IPython.utils.text import indent, wrap_paragraphs, dedent
from IPython.utils import py3compat
from IPython.utils.py3compat import string_types, iteritems
#-----------------------------------------------------------------------------
# function for re-wrapping a helpstring
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Descriptions for the various sections
#-----------------------------------------------------------------------------
# merge flags&aliases into options
option_description = """
Arguments that take values are actually convenience aliases to full
Configurables, whose aliases are listed on the help line. For more information
on full configurables, see '--help-all'.
""".strip() # trim newlines of front and back
keyvalue_description = """
Parameters are set from command-line arguments of the form:
`--Class.trait=value`.
This line is evaluated in Python, so simple expressions are allowed, e.g.::
`--C.a='range(3)'` For setting C.a=[0,1,2].
""".strip() # trim newlines of front and back
# sys.argv can be missing, for example when python is embedded. See the docs
# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
if not hasattr(sys, "argv"):
sys.argv = [""]
subcommand_description = """
Subcommands are launched as `{app} cmd [args]`. For information on using
subcommand 'cmd', do: `{app} cmd -h`.
""".strip().format(app=os.path.basename(sys.argv[0]))
# get running program name
#-----------------------------------------------------------------------------
# Application class
#-----------------------------------------------------------------------------
@decorator
def catch_config_error(method, app, *args, **kwargs):
"""Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
On a TraitError (generally caused by bad config), this will print the trait's
message, and exit the app.
For use on init methods, to prevent invoking excepthook on invalid input.
"""
try:
return method(app, *args, **kwargs)
except (TraitError, ArgumentError) as e:
app.print_help()
app.log.fatal("Bad config encountered during initialization:")
app.log.fatal(str(e))
app.log.debug("Config at the time: %s", app.config)
app.exit(1)
class ApplicationError(Exception):
pass
class LevelFormatter(logging.Formatter):
"""Formatter with additional `highlevel` record
This field is empty if log level is less than highlevel_limit,
otherwise it is formatted with self.highlevel_format.
Useful for adding 'WARNING' to warning messages,
without adding 'INFO' to info, etc.
"""
highlevel_limit = logging.WARN
highlevel_format = " %(levelname)s |"
def format(self, record):
if record.levelno >= self.highlevel_limit:
record.highlevel = self.highlevel_format % record.__dict__
else:
record.highlevel = ""
return super(LevelFormatter, self).format(record)
class Application(SingletonConfigurable):
"""A singleton application with full configuration support."""
# The name of the application, will usually match the name of the command
# line application
name = Unicode(u'application')
# The description of the application that is printed at the beginning
# of the help.
description = Unicode(u'This is an application.')
# default section descriptions
option_description = Unicode(option_description)
keyvalue_description = Unicode(keyvalue_description)
subcommand_description = Unicode(subcommand_description)
# The usage and example string that goes at the end of the help string.
examples = Unicode()
# A sequence of Configurable subclasses whose config=True attributes will
# be exposed at the command line.
classes = List([])
# The version string of this application.
version = Unicode(u'0.0')
# the argv used to initialize the application
argv = List()
# The log level for the application
log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
default_value=logging.WARN,
config=True,
help="Set the log level by value or name.")
def _log_level_changed(self, name, old, new):
"""Adjust the log level when log_level is set."""
if isinstance(new, string_types):
new = getattr(logging, new)
self.log_level = new
self.log.setLevel(new)
log_datefmt = Unicode("%Y-%m-%d %H:%M:%S", config=True,
help="The date format used by logging formatters for %(asctime)s"
)
def _log_datefmt_changed(self, name, old, new):
self._log_format_changed()
log_format = Unicode("[%(name)s]%(highlevel)s %(message)s", config=True,
help="The Logging format template",
)
def _log_format_changed(self, name, old, new):
"""Change the log formatter when log_format is set."""
_log_handler = self.log.handlers[0]
_log_formatter = LevelFormatter(new, datefmt=self.log_datefmt)
_log_handler.setFormatter(_log_formatter)
log = Instance(logging.Logger)
def _log_default(self):
"""Start logging for this application.
The default is to log to stderr using a StreamHandler, if no default
handler already exists. The log level starts at logging.WARN, but this
can be adjusted by setting the ``log_level`` attribute.
"""
log = logging.getLogger(self.__class__.__name__)
log.setLevel(self.log_level)
log.propagate = False
_log = log # copied from Logger.hasHandlers() (new in Python 3.2)
while _log:
if _log.handlers:
return log
if not _log.propagate:
break
else:
_log = _log.parent
if sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = LevelFormatter(self.log_format, datefmt=self.log_datefmt)
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log
# the alias map for configurables
aliases = Dict({'log-level' : 'Application.log_level'})
# flags for loading Configurables or store_const style flags
# flags are loaded from this dict by '--key' flags
# this must be a dict of two-tuples, the first element being the Config/dict
# and the second being the help string for the flag
flags = Dict()
def _flags_changed(self, name, old, new):
"""ensure flags dict is valid"""
for key,value in iteritems(new):
assert len(value) == 2, "Bad flag: %r:%s"%(key,value)
assert isinstance(value[0], (dict, Config)), "Bad flag: %r:%s"%(key,value)
assert isinstance(value[1], string_types), "Bad flag: %r:%s"%(key,value)
# subcommands for launching other applications
# if this is not empty, this will be a parent Application
# this must be a dict of two-tuples,
# the first element being the application class/import string
# and the second being the help string for the subcommand
subcommands = Dict()
# parse_command_line will initialize a subapp, if requested
subapp = Instance('IPython.config.application.Application', allow_none=True)
# extra command-line arguments that don't set config values
extra_args = List(Unicode)
def __init__(self, **kwargs):
SingletonConfigurable.__init__(self, **kwargs)
# Ensure my class is in self.classes, so my attributes appear in command line
# options and config files.
if self.__class__ not in self.classes:
self.classes.insert(0, self.__class__)
def _config_changed(self, name, old, new):
SingletonConfigurable._config_changed(self, name, old, new)
self.log.debug('Config changed:')
self.log.debug(repr(new))
@catch_config_error
def initialize(self, argv=None):
"""Do the basic steps to configure me.
Override in subclasses.
"""
self.parse_command_line(argv)
def start(self):
"""Start the app mainloop.
Override in subclasses.
"""
if self.subapp is not None:
return self.subapp.start()
def print_alias_help(self):
"""Print the alias part of the help."""
if not self.aliases:
return
lines = []
classdict = {}
for cls in self.classes:
# include all parents (up to, but excluding Configurable) in available names
for c in cls.mro()[:-3]:
classdict[c.__name__] = c
for alias, longname in iteritems(self.aliases):
classname, traitname = longname.split('.',1)
cls = classdict[classname]
trait = cls.class_traits(config=True)[traitname]
help = cls.class_get_trait_help(trait).splitlines()
# reformat first line
help[0] = help[0].replace(longname, alias) + ' (%s)'%longname
if len(alias) == 1:
help[0] = help[0].replace('--%s='%alias, '-%s '%alias)
lines.extend(help)
# lines.append('')
print(os.linesep.join(lines))
def print_flag_help(self):
"""Print the flag part of the help."""
if not self.flags:
return
lines = []
for m, (cfg,help) in iteritems(self.flags):
prefix = '--' if len(m) > 1 else '-'
lines.append(prefix+m)
lines.append(indent(dedent(help.strip())))
# lines.append('')
print(os.linesep.join(lines))
def print_options(self):
if not self.flags and not self.aliases:
return
lines = ['Options']
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.option_description):
lines.append(p)
lines.append('')
print(os.linesep.join(lines))
self.print_flag_help()
self.print_alias_help()
print()
def print_subcommands(self):
"""Print the subcommand part of the help."""
if not self.subcommands:
return
lines = ["Subcommands"]
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.subcommand_description):
lines.append(p)
lines.append('')
for subc, (cls, help) in iteritems(self.subcommands):
lines.append(subc)
if help:
lines.append(indent(dedent(help.strip())))
lines.append('')
print(os.linesep.join(lines))
def print_help(self, classes=False):
"""Print the help for each Configurable class in self.classes.
If classes=False (the default), only flags and aliases are printed.
"""
self.print_description()
self.print_subcommands()
self.print_options()
if classes:
if self.classes:
print("Class parameters")
print("----------------")
print()
for p in wrap_paragraphs(self.keyvalue_description):
print(p)
print()
for cls in self.classes:
cls.class_print_help()
print()
else:
print("To see all available configurables, use `--help-all`")
print()
self.print_examples()
def print_description(self):
"""Print the application description."""
for p in wrap_paragraphs(self.description):
print(p)
print()
def print_examples(self):
"""Print usage and examples.
This usage string goes at the end of the command line help string
and should contain examples of the application's usage.
"""
if self.examples:
print("Examples")
print("--------")
print()
print(indent(dedent(self.examples.strip())))
print()
def print_version(self):
"""Print the version string."""
print(self.version)
def update_config(self, config):
"""Fire the traits events when the config is updated."""
# Save a copy of the current config.
newconfig = deepcopy(self.config)
# Merge the new config into the current one.
newconfig.merge(config)
# Save the combined config as self.config, which triggers the traits
# events.
self.config = newconfig
@catch_config_error
def initialize_subcommand(self, subc, argv=None):
"""Initialize a subcommand with argv."""
subapp,help = self.subcommands.get(subc)
if isinstance(subapp, string_types):
subapp = import_item(subapp)
# clear existing instances
self.__class__.clear_instance()
# instantiate
self.subapp = subapp.instance(config=self.config)
# and initialize subapp
self.subapp.initialize(argv)
def flatten_flags(self):
"""flatten flags and aliases, so cl-args override as expected.
This prevents issues such as an alias pointing to InteractiveShell,
but a config file setting the same trait in TerminalInteraciveShell
getting inappropriate priority over the command-line arg.
Only aliases with exactly one descendent in the class list
will be promoted.
"""
# build a tree of classes in our list that inherit from a particular
# it will be a dict by parent classname of classes in our list
# that are descendents
mro_tree = defaultdict(list)
for cls in self.classes:
clsname = cls.__name__
for parent in cls.mro()[1:-3]:
# exclude cls itself and Configurable,HasTraits,object
mro_tree[parent.__name__].append(clsname)
# flatten aliases, which have the form:
# { 'alias' : 'Class.trait' }
aliases = {}
for alias, cls_trait in iteritems(self.aliases):
cls,trait = cls_trait.split('.',1)
children = mro_tree[cls]
if len(children) == 1:
# exactly one descendent, promote alias
cls = children[0]
aliases[alias] = '.'.join([cls,trait])
# flatten flags, which are of the form:
# { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
flags = {}
for key, (flagdict, help) in iteritems(self.flags):
newflag = {}
for cls, subdict in iteritems(flagdict):
children = mro_tree[cls]
# exactly one descendent, promote flag section
if len(children) == 1:
cls = children[0]
newflag[cls] = subdict
flags[key] = (newflag, help)
return flags, aliases
@catch_config_error
def parse_command_line(self, argv=None):
"""Parse the command line arguments."""
argv = sys.argv[1:] if argv is None else argv
self.argv = [ py3compat.cast_unicode(arg) for arg in argv ]
if argv and argv[0] == 'help':
# turn `ipython help notebook` into `ipython notebook -h`
argv = argv[1:] + ['-h']
if self.subcommands and len(argv) > 0:
# we have subcommands, and one may have been specified
subc, subargv = argv[0], argv[1:]
if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
# it's a subcommand, and *not* a flag or class parameter
return self.initialize_subcommand(subc, subargv)
# Arguments after a '--' argument are for the script IPython may be
# about to run, not IPython iteslf. For arguments parsed here (help and
# version), we want to only search the arguments up to the first
# occurrence of '--', which we're calling interpreted_argv.
try:
interpreted_argv = argv[:argv.index('--')]
except ValueError:
interpreted_argv = argv
if any(x in interpreted_argv for x in ('-h', '--help-all', '--help')):
self.print_help('--help-all' in interpreted_argv)
self.exit(0)
if '--version' in interpreted_argv or '-V' in interpreted_argv:
self.print_version()
self.exit(0)
# flatten flags&aliases, so cl-args get appropriate priority:
flags,aliases = self.flatten_flags()
loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
flags=flags, log=self.log)
config = loader.load_config()
self.update_config(config)
# store unparsed args in extra_args
self.extra_args = loader.extra_args
@classmethod
def _load_config_files(cls, basefilename, path=None, log=None):
"""Load config files (py,json) by filename and path.
yield each config object in turn.
"""
pyloader = PyFileConfigLoader(basefilename+'.py', path=path, log=log)
jsonloader = JSONFileConfigLoader(basefilename+'.json', path=path, log=log)
config = None
for loader in [pyloader, jsonloader]:
try:
config = loader.load_config()
except ConfigFileNotFound:
pass
except Exception:
# try to get the full filename, but it will be empty in the
# unlikely event that the error raised before filefind finished
filename = loader.full_filename or basefilename
# problem while running the file
if log:
log.error("Exception while loading config file %s",
filename, exc_info=True)
else:
if log:
log.debug("Loaded config file: %s", loader.full_filename)
if config:
yield config
raise StopIteration
@catch_config_error
def load_config_file(self, filename, path=None):
"""Load config files by filename and path."""
filename, ext = os.path.splitext(filename)
for config in self._load_config_files(filename, path=path, log=self.log):
self.update_config(config)
def generate_config_file(self):
"""generate default config file from Configurables"""
lines = ["# Configuration file for %s."%self.name]
lines.append('')
lines.append('c = get_config()')
lines.append('')
for cls in self.classes:
lines.append(cls.class_config_section())
return '\n'.join(lines)
def exit(self, exit_status=0):
self.log.debug("Exiting application: %s" % self.name)
sys.exit(exit_status)
@classmethod
def launch_instance(cls, argv=None, **kwargs):
"""Launch a global instance of this Application
If a global instance already exists, this reinitializes and starts it
"""
app = cls.instance(**kwargs)
app.initialize(argv)
app.start()
#-----------------------------------------------------------------------------
# utility functions, for convenience
#-----------------------------------------------------------------------------
def boolean_flag(name, configurable, set_help='', unset_help=''):
"""Helper for building basic --trait, --no-trait flags.
Parameters
----------
name : str
The name of the flag.
configurable : str
The 'Class.trait' string of the trait to be set/unset with the flag
set_help : unicode
help string for --name flag
unset_help : unicode
help string for --no-name flag
Returns
-------
cfg : dict
A dict with two keys: 'name', and 'no-name', for setting and unsetting
the trait, respectively.
"""
# default helpstrings
set_help = set_help or "set %s=True"%configurable
unset_help = unset_help or "set %s=False"%configurable
cls,trait = configurable.split('.')
setter = {cls : {trait : True}}
unsetter = {cls : {trait : False}}
return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)}
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/IPython/config/application.py
|
Python
|
gpl-3.0
| 21,862
|
[
"Brian"
] |
3c1546f248c954f3b80d330cee047e8762496c969259bb6970fb81371f58e7b6
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygameui as ui
from osci.StarMapWidget import StarMapWidget
from osci import gdata, res, client, messages
from ige.ospace.Const import *
from NewMessageDlg import NewMessageDlg
from ConfirmDlg import ConfirmDlg
import time
from ige import log
import re
class MessagesDlg:
def __init__(self, app):
self.app = app
self.createUI()
#
self.selectedObjID = None
self.selectedForum = None
self.selectedTopic = None
self.selectedType = None
self.newMessageDlg = NewMessageDlg(app)
self.newMsgs = 0
self.confirmDlg = ConfirmDlg(app)
self.uignore = []
self.gignore = []
self.lignore = []
if gdata.config.ignore.universe:
self.uignore = gdata.config.ignore.universe.split(',')
if gdata.config.ignore.galaxy:
self.gignore = gdata.config.ignore.galaxy.split(',')
if gdata.config.ignore.local:
self.lignore = gdata.config.ignore.local.split(',')
def display(self):
self.show()
self.win.show()
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
def hide(self):
self.win.setStatus(_("Ready."))
self.win.hide()
# unregister updates
if self in gdata.updateDlgs:
gdata.updateDlgs.remove(self)
def update(self):
self.show()
def show(self, updateForum = 1):
self.newMsgs = 0
#
player = client.getPlayer()
objList = [player.oid]
objList.extend(player.galaxies)
objList.append(OID_UNIVERSE)
# show forums
items = []
colors = [gdata.sevColors[gdata.INFO], gdata.sevColors[gdata.MIN]]
# Inbox
msgs, new = self.getMsgsNumber(player.oid, "INBOX",'local')
self.newMsgs += new
spec = gdata.mailboxSpec[T_PLAYER, "INBOX"]
item = ui.Item(_(spec[0]), tObjID = player.oid, tForum = "INBOX",
tType = T_PLAYER, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# Events
msgs, new = self.getMsgsNumber(player.oid, "EVENTS")
self.newMsgs += new
spec = gdata.mailboxSpec[T_PLAYER, "EVENTS"]
eventsItem = ui.Item(_(spec[0]), tObjID = player.oid, tForum = "EVENTS",
tType = T_PLAYER, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(eventsItem)
# Outbox
msgs, new = self.getMsgsNumber(player.oid, "OUTBOX")
self.newMsgs += new
spec = gdata.mailboxSpec[T_PLAYER, "OUTBOX"]
item = ui.Item(_(spec[0]), tObjID = player.oid, tForum = "OUTBOX",
tType = T_PLAYER, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# galaxies
for galaxyID in player.galaxies:
galaxy = client.get(galaxyID)
# folder
item = ui.Item(_("Galaxy %s") % galaxy.name, tObjID = OID_NONE, tForum = "", tMsgs = "", foreground = colors[0])
items.append(item)
# news
msgs, new = self.getMsgsNumber(galaxyID, "NEWS")
self.newMsgs += new
spec = gdata.mailboxSpec[T_GALAXY, "NEWS"]
item = ui.Item(" %s" % _(spec[0]), tObjID = galaxyID, tForum = "NEWS",
tType = T_GALAXY, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# public
msgs, new = self.getMsgsNumber(galaxyID, "PUBLIC",'galaxy')
self.newMsgs += new
spec = gdata.mailboxSpec[T_GALAXY, "PUBLIC"]
item = ui.Item(" %s" % _(spec[0]), tObjID = galaxyID, tForum = "PUBLIC",
tType = T_GALAXY, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# universe
item = ui.Item(_("English channels"), tObjID = OID_NONE, tForum = "", tMsgs = "", foreground = colors[0])
items.append(item)
# news
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "NEWS")
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "NEWS"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "NEWS",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# public
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "PUBLIC",'universe')
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "PUBLIC"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "PUBLIC",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# qa
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "QA",'universe')
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "QA"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "QA",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# ideas
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "IDEAS",'universe')
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "IDEAS"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "IDEAS",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# issues
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "ISSUES",'universe')
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "ISSUES"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "ISSUES",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# universe (CZ)
item = ui.Item(_("Czech channels"), tObjID = OID_NONE, tForum = "", tMsgs = "", foreground = colors[0])
items.append(item)
# public
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "CZ_PUBLIC",'universe')
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "PUBLIC"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "CZ_PUBLIC",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# qa
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "CZ_QA",'universe')
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "QA"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "CZ_QA",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
# ideas
msgs, new = self.getMsgsNumber(OID_UNIVERSE, "CZ_IDEAS",'universe')
self.newMsgs += new
spec = gdata.mailboxSpec[T_UNIVERSE, "IDEAS"]
item = ui.Item(" %s" % _(spec[0]), tObjID = OID_UNIVERSE, tForum = "CZ_IDEAS",
tType = T_UNIVERSE, tMsgs = _("%d / %d") % (new, msgs), foreground = colors[new > 0])
items.append(item)
#
self.win.vForums.items = items
self.win.vForums.itemsChanged()
# find item to select
found = 0
for item in items:
if item.tObjID == self.selectedObjID and item.tForum == self.selectedForum:
self.win.vForums.selectItem(item)
found = 1
break
if not found:
self.win.vForums.selectItem(eventsItem)
# display forum
if updateForum:
self.onForumSelected(None, None, None)
# update mgs button
gdata.mainGameDlg.updateMsgButton()
def getMsgsNumber(self, objID, forum, forumtype='none'):
try:
messages = client.get(objID)._messages
except AttributeError:
log.debug("Suplying default messages for object", objID, forum)
messages = {}
new = 0
msgs = 0
for messageID in messages:
message = messages[messageID]
if forumtype == 'universe':
if message["sender"] in self.uignore:
continue;
elif forumtype == 'galaxy':
if message["sender"] in self.gignore:
continue;
elif forumtype == 'local':
if message["sender"] in self.lignore:
continue;
if message["forum"] == forum:
if message["readed"] == 0:
new += 1
msgs += 1
return msgs, new
def onForumSelected(self, widget, action, data):
selItem = self.win.vForums.selection[0]
if selItem.tObjID == OID_NONE:
# select last valid
for item in self.win.vForums.items:
if item.tObjID == self.selectedObjID and \
item.tForum == self.selectedForum:
self.win.vForums.selectItem(item)
break
return
self.selectedObjID = selItem.tObjID
self.selectedForum = selItem.tForum
self.selectedType = selItem.tType
self.win.vNewTopic.enabled = 1
self.win.vAllReaded.enabled = 1
self.win.vDeleteAll.enabled = 1
player = client.getPlayer()
playerid = player.oid
objMessages = client.get(selItem.tObjID)._messages
ids = objMessages.keys()
ids.sort()
ids.reverse()
topics = []
items = []
for messageID in ids:
message = objMessages[messageID]
if message["forum"] == selItem.tForum:
# ignore by universe, local (private messages), or galaxy
if selItem.tObjID == OID_UNIVERSE:
if message["sender"] in self.uignore:
continue;
elif selItem.tObjID == playerid:
if message["sender"] in self.lignore:
continue;
else:
if message["sender"] in self.gignore:
continue;
# regenerate topics for messages with data
if message.has_key("data") and message["topic"] == "EVENT":
sourceID, msgID, locationID, turn, data = message["data"]
message["topic"] = messages.getMsgText(msgID, data).split("\n")[0]
topic = message["topic"]
item = ui.Item(topic)
if topic not in topics:
topics.append(topic)
#item = ui.Item(topic, tObjID = selItem.tObjID, tForum = selItem.tForum,
# tMgsId = -1, font = "normal-bold")
#items.append(item)
if message.has_key("data"):
sourceID, msgID, locationID, turn, data = message["data"]
item.tLocationID = locationID
item.tSourceID = sourceID
obj = client.get(sourceID, noUpdate = 1, publicOnly = 1) #Custom name in "sender"
if hasattr(obj,'customname') and obj.customname:
item.tSender = _('"%s"') % obj.customname
else:
item.tSender = message["sender"]
foreground = gdata.sevColors[messages.getMsgSeverity(msgID)]
if message["readed"]:
state = _(" ")
else:
state = _("N")
else:
item.tSender = message["sender"]
if message["readed"]:
foreground = gdata.sevColors[gdata.NONE]
if message["replied"]:
state = _("R")
else:
state = _(" ")
else:
foreground = gdata.sevColors[gdata.MIN]
state = _("N")
date = time.strftime(_("%m-%d %H:%M"), time.localtime(message["time"]))
#if date == time.strftime(_("%Y-%m-%d"), time.localtime()):
# date = time.strftime(_("%H:%M"), time.localtime(message["time"]))
item.tObjID = selItem.tObjID
item.tForum = selItem.tForum
item.tMsgID = messageID
item.foreground = foreground
item.tState = state
item.tDate = date
item.tDate_raw = message["time"]
item.tType = selItem.tType
items.append(item)
self.win.vMessages.items = items
self.win.vMessages.itemsChanged()
if self.selectedForum != "EVENTS":
# reset messages scrollbar position
self.win.vMessages.bar.slider.position = 0
self.win.vMessages.itemsChanged()
self.win.vMessage.text = [""]
self.win.vReply.enabled = 0
self.win.vNewTopic.enabled = gdata.mailboxSpec[selItem.tType, gdata.mailboxStripLang(selItem.tForum)][1] != None
self.win.vDelete.enabled = len(self.win.vMessages.selection) > 0
def onMessageSelected(self, widget, action, data):
selItem = self.win.vMessages.selection[0]
selItem.tState = " "
message = client.get(selItem.tObjID)._messages[selItem.tMsgID]
if "data" not in message:
selItem.foreground = gdata.sevColors[gdata.NONE]
self.win.vMessages.itemsChanged()
message["readed"] = 1
text = []
if message.has_key("data"):
text = messages.getFullMessageText(message).split("\n")
if message.has_key("text"):
text.extend(message["text"].split("\n"))
self.win.vMessage.text = text
self.win.vMessage.offsetRow = 0
self.win.vMessage.vertScrollbar.slider.position = 0
self.win.vReply.enabled = gdata.mailboxSpec[selItem.tType, gdata.mailboxStripLang(selItem.tForum)][1] != None
self.win.vDelete.enabled = 1
self.show(updateForum = 0)
def onNewTopic(self, widget, action, data):
self.newMessageDlg.display(self, self.selectedObjID, self.selectedType, self.selectedForum)
def onReply(self, widget, action, data):
selItem = self.win.vMessages.selection[0]
topic = client.get(selItem.tObjID)._messages[selItem.tMsgID]["topic"]
self.newMessageDlg.display(self, self.selectedObjID, self.selectedType, self.selectedForum, selItem.tMsgID)
def onRefresh(self, widget, action, data):
client.getMessages()
self.update()
def onDelete(self, widget, action, data):
selItem = self.win.vMessages.selection[0]
del client.get(selItem.tObjID)._messages[selItem.tMsgID]
self.update()
def onDeleteAll(self, widget, action, data):
self.confirmDlg.display(_("Delete all messages in this mailbox?"),
_("Yes"), _("No"), self.onDeleteAllConfirmed)
def onDeleteAllConfirmed(self):
obj = client.get(self.selectedObjID)
# must make list (changing directory)
for messageID in obj._messages.keys():
message = obj._messages[messageID]
if message["forum"] == self.selectedForum:
del obj._messages[messageID]
self.update()
def onAllReaded(self, widget, action, data):
obj = client.get(self.selectedObjID)
for messageID in obj._messages:
message = obj._messages[messageID]
if message["forum"] == self.selectedForum:
message["readed"] = 1
self.update()
def onPostMenu(self, widget, action, data):
selItem = self.win.vMessages.selection[0]
if hasattr(selItem, "tLocationID"):
self.eventPopup.show()
def onShowLoc(self, widget, action, data):
item = self.win.vMessages.selection[0]
if item.tLocationID != OID_NONE:
obj = client.get(item.tLocationID, noUpdate = 1)
if hasattr(obj, "x"):
gdata.mainGameDlg.win.vStarMap.highlightPos = (obj.x, obj.y)
gdata.mainGameDlg.win.vStarMap.setPos(obj.x, obj.y)
self.hide()
return
if item.tForum == "EVENTS" and re.match(('^%s(.*)')%(_('Research completed:')),item.text):
gdata.mainGameDlg.researchDlg.display()
return
self.win.setStatus(_("Cannot show location"))
def onShowLocDel(self, widget, action, data):
self.onShowLoc(widget, action, data)
self.onDelete(widget, action, data)
def onShowSource(self, widget, action, data):
item = self.win.vMessages.selection[0]
if item.tForum == "EVENTS" and re.match(('^%s(.*)')%(_('Research completed:')),item.text):
gdata.mainGameDlg.researchDlg.display()
return
if item.tSourceID != OID_NONE:
gdata.mainGameDlg.onSelectMapObj(None, None, item.tSourceID)
#self.hide()
return
self.win.setStatus(_("Cannot show location"))
def onShowSourceDel(self, widget, action, data):
self.onShowSource(widget, action, data)
self.onDelete(widget, action, data)
def onClose(self, widget, action, data):
self.hide()
def createUI(self):
w, h = gdata.scrnSize
self.win = ui.Window(self.app,
modal = 1,
escKeyClose = 1,
titleOnly = w == 800 and h == 600,
movable = 0,
title = _("Messages and events"),
rect = ui.Rect((w - 800 - 4 * (w != 800)) / 2, (h - 600 - 4 * (h != 600)) / 2, 800 + 4 * (w != 800), 580 + 4 * (h != 600)),
layoutManager = ui.SimpleGridLM(),
)
self.win.subscribeAction('*', self)
# forums
ui.Listbox(self.win, layout = (0, 0, 10, 27), id = "vForums",
columns = ((_("Channel"), "text", 5.5, ui.ALIGN_W), (_("#"), "tMsgs", 4.5, ui.ALIGN_E)),
columnLabels = 1, action = "onForumSelected", sortable = 0)
# topics
ui.Listbox(self.win, layout = (10, 0, 30, 15), id = "vMessages",
columns = (
(_(" "), "tState", 1, ui.ALIGN_NONE),
(_("Date"), "tDate", 4, ui.ALIGN_W),
(_("Sender"), "tSender", 7, ui.ALIGN_W),
(_("Subject"), "text", 0, ui.ALIGN_W),
),
columnLabels = 1, action = "onMessageSelected", rmbAction = "onPostMenu")
# messages
ui.Title(self.win, layout = (10, 15, 5, 1),
font = "normal-bold", align = ui.ALIGN_W)
ui.Button(self.win, layout = (15, 15, 5, 1), text = _("New subject"),
action = "onNewTopic", id = "vNewTopic", enabled = 0)
ui.Button(self.win, layout = (20, 15, 5, 1), text = _("Reply"),
action = "onReply", id = "vReply", enabled = 0)
ui.Button(self.win, layout = (25, 15, 5, 1), text = _("Read all"),
action = "onAllReaded", id = "vAllReaded", enabled = 0)
ui.Button(self.win, layout = (30, 15, 5, 1), text = _("Delete"),
action = "onDelete", id = "vDelete", enabled = 0)
ui.Button(self.win, layout = (35, 15, 5, 1), text = _("Delete all"),
action = "onDeleteAll", id = "vDeleteAll", enabled = 0)
s = ui.Scrollbar(self.win, layout = (39, 16, 1, 11))
t = ui.Text(self.win, layout = (10, 16, 29, 11), id = "vMessage", editable = 0)
t.attachVScrollbar(s)
# status bar
ui.TitleButton(self.win, layout = (30, 27, 5, 1), text = _('Refresh'), action = 'onRefresh')
ui.TitleButton(self.win, layout = (35, 27, 5, 1), text = _('Close'), action = 'onClose')
ui.Title(self.win, id = 'vStatusBar', layout = (0, 27, 30, 1), align = ui.ALIGN_W)
#@self.win.statusBar = self.win.vStatusBar
# event menu
self.eventPopup = ui.Menu(self.app, title = _("Message actions"),
items = [
ui.Item(_("Show location"), action = "onShowLoc"),
ui.Item(_("Show source"), action = "onShowSource"),
ui.Item(_("Show location and delete msg"), action = "onShowLocDel"),
ui.Item(_("Show source and delete msg"), action = "onShowSourceDel"),
ui.Item(_("Delete"), action = "onDelete"),
]
)
self.eventPopup.subscribeAction("*", self)
|
Lukc/ospace-lukc
|
client-pygame/lib/osci/dialog/MessagesDlg.py
|
Python
|
gpl-2.0
| 17,967
|
[
"Galaxy"
] |
6b0a49c5ca9aee35574cf3ff20a940420d055e5b83eb3d63bd846a577753073a
|
# Copyright (C) 2004-2009 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
## @file camera.py
"""
Class and functions associated with a pyvisi Camera object
"""
# generic imports
from pyvisi.common import debugMsg, overrideWarning
from pyvisi.item import Item
__revision__ = '$Revision$'
class Camera(Item):
"""
Camera class
"""
def __init__(self, scene):
"""
Initialisation of the Camera object
@param scene: The Scene object to add the Camera object to
@type scene: Scene object
"""
Item.__init__(self)
debugMsg("Called Camera.__init__()")
# default x,y,z positions of Camera (specific to vtk)
self.xPos = 0.0
self.yPos = 0.0
self.zPos = 3.0
# default x,y,z positions of the Camers's focal point (specific to vtk)
self.xFocalPoint = 0.0
self.yFocalPoint = 0.0
self.zFocalPoint = 0.0
# default elevation and azimuth
self.elevation = 30 ####### we should try for matlab defaults
self.azimuth = 30
# keep a reference to the renderer so we can send stuff to it
self.renderer = scene.renderer
# some vtk initialisation commands
self.renderer.runString("# Camera.__init__()\n")
# initialise the position of the Camera
self.setPosition(self.xPos, self.yPos, self.zPos)
self.setFocalPoint(self.xFocalPoint, self.yFocalPoint, self.zFocalPoint)
def setPosition(self, *pos):
"""
Set position of camera within scene
@param pos: Position to set camera in terms of x,y,z coordinates
@type pos: tuple
"""
debugMsg("Called Camera.setPosition()")
# I need to do some mucking around in here with coordinate systems
# and so on, but at present, we'll just use vtk's coord system
self.xPos = pos[0]
self.yPos = pos[1]
self.zPos = pos[2]
# print a warning message if get to here
overrideWarning("Camera.setPosition")
# now to set the position
return
def getPosition(self):
"""
Get the position of Camera within Scene
Returns the position in a tuple of form (xPos, yPos, zPos)
"""
debugMsg("Called Camera.getPosition()")
return (self.xPos, self.yPos, self.zPos)
def setFocalPoint(self, *pos):
"""
Sets the focal point of the Camera with the Scene
@param pos: Position to set the focal point
@type pos: tuple
"""
debugMsg("Called Camera.setFocalPoint()")
# print a warning message if get to here
overrideWarning("Camera.setFocalPoint")
# I need to do some mucking around in here with coordinate systems
# and so on, but at present, we'll just use vtk's coord system
self.xFocalPoint = pos[0]
self.yFocalPoint = pos[1]
self.zFocalPoint = pos[2]
# now set the focal point position
self.renderer.runString("#Camera.setFocalPoint()\n")
return
def getFocalPoint(self):
"""
Get the position of the focal point of the Camera
Returns the position of the focal point in a tuple of form
(xPos, yPos, zPos)
"""
debugMsg("Called Camera.getFocalPoint()")
return (self.xFocalPoint, self.yFocalPoint, self.zFocalPoint)
def setElevation(self, elevation):
"""
Set the elevation angle (in degrees) of the Camera
@param elevation: The elevation angle (in degrees) of the Camera
@type elevation: float
"""
debugMsg("Called Camera.setElevation()")
self.elevation = elevation
# print a warning message if get to here
overrideWarning("Camera.setElevation")
return
def getElevation(self):
"""
Gets the elevation angle (in degrees) of the Camera
"""
debugMsg("Called Camera.getElevation()")
return self.elevation
def setAzimuth(self, azimuth):
"""
Set the azimuthal angle (in degrees) of the Camera
@param azimuth: The azimuthal angle (in degrees) of the Camera
@type azimuth: float
"""
debugMsg("Called Camera.setAzimuth()")
self.azimuth = azimuth
# print a warning message if get to here
overrideWarning("Camera.setAzimuth")
return
def getAzimuth(self):
"""
Get the azimuthal angle (in degrees) of the Camera
"""
debugMsg("Called Camera.getAzimuth()")
return self.azimuth
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
pyvisi/camera.py
|
Python
|
gpl-2.0
| 5,343
|
[
"VTK"
] |
6be4aae38f47759c64a2ebf48d8c12804ea81c94204b4a11f6515eaa85120811
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceAsyncClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import pagers
from google.cloud.gaming_v1beta.services.game_server_clusters_service import transports
from google.cloud.gaming_v1beta.types import common
from google.cloud.gaming_v1beta.types import game_server_clusters
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert GameServerClustersServiceClient._get_default_mtls_endpoint(None) is None
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.GameServerClustersServiceGrpcTransport, "grpc"),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_game_server_clusters_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_client_get_transport_class():
transport = GameServerClustersServiceClient.get_transport_class()
available_transports = [
transports.GameServerClustersServiceGrpcTransport,
]
assert transport in available_transports
transport = GameServerClustersServiceClient.get_transport_class("grpc")
assert transport == transports.GameServerClustersServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"true",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"false",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_game_server_clusters_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_game_server_clusters_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_game_server_clusters_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_game_server_clusters_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = GameServerClustersServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_game_server_clusters_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"gameservices.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="gameservices.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.ListGameServerClustersRequest, dict,]
)
def test_list_game_server_clusters(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGameServerClustersPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_game_server_clusters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
client.list_game_server_clusters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
@pytest.mark.asyncio
async def test_list_game_server_clusters_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.ListGameServerClustersRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGameServerClustersAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_from_dict():
await test_list_game_server_clusters_async(request_type=dict)
def test_list_game_server_clusters_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = game_server_clusters.ListGameServerClustersResponse()
client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
await client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_game_server_clusters_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_game_server_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_game_server_clusters_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_game_server_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
def test_list_game_server_clusters_pager(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_game_server_clusters(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in results
)
def test_list_game_server_clusters_pages(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = list(client.list_game_server_clusters(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pager():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
async_pager = await client.list_game_server_clusters(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in responses
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pages():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_game_server_clusters(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [game_server_clusters.GetGameServerClusterRequest, dict,]
)
def test_get_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
response = client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_get_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
client.get_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
@pytest.mark.asyncio
async def test_get_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.GetGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_game_server_cluster_async_from_dict():
await test_get_game_server_cluster_async(request_type=dict)
def test_get_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.GameServerCluster()
client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
await client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.CreateGameServerClusterRequest, dict,]
)
def test_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
client.create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.CreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_game_server_cluster_async_from_dict():
await test_create_game_server_cluster_async(request_type=dict)
def test_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
def test_create_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewCreateGameServerClusterRequest, dict,]
)
def test_preview_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_create_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
client.preview_create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewCreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async_from_dict():
await test_preview_create_game_server_cluster_async(request_type=dict)
def test_preview_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
await client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.DeleteGameServerClusterRequest, dict,]
)
def test_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
client.delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.DeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async_from_dict():
await test_delete_game_server_cluster_async(request_type=dict)
def test_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewDeleteGameServerClusterRequest, dict,]
)
def test_preview_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_delete_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
client.preview_delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewDeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async_from_dict():
await test_preview_delete_game_server_cluster_async(request_type=dict)
def test_preview_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
await client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.UpdateGameServerClusterRequest, dict,]
)
def test_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
client.update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.UpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_game_server_cluster_async_from_dict():
await test_update_game_server_cluster_async(request_type=dict)
def test_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_update_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewUpdateGameServerClusterRequest, dict,]
)
def test_preview_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_update_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
client.preview_update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewUpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async_from_dict():
await test_preview_update_game_server_cluster_async(request_type=dict)
def test_preview_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
await client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = GameServerClustersServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.GameServerClustersServiceGrpcTransport,
)
def test_game_server_clusters_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_game_server_clusters_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_game_server_clusters",
"get_game_server_cluster",
"create_game_server_cluster",
"preview_create_game_server_cluster",
"delete_game_server_cluster",
"preview_delete_game_server_cluster",
"update_game_server_cluster",
"preview_update_game_server_cluster",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_game_server_clusters_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_game_server_clusters_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport()
adc.assert_called_once()
def test_game_server_clusters_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
GameServerClustersServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.GameServerClustersServiceGrpcTransport, grpc_helpers),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_game_server_clusters_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"gameservices.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="gameservices.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_game_server_clusters_service_host_no_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com"
),
)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_host_with_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com:8000"
),
)
assert client.transport._host == "gameservices.googleapis.com:8000"
def test_game_server_clusters_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.GameServerClustersServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_game_server_clusters_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_game_server_clusters_service_grpc_lro_client():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_game_server_clusters_service_grpc_lro_async_client():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_game_server_cluster_path():
project = "squid"
location = "clam"
realm = "whelk"
cluster = "octopus"
expected = "projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}".format(
project=project, location=location, realm=realm, cluster=cluster,
)
actual = GameServerClustersServiceClient.game_server_cluster_path(
project, location, realm, cluster
)
assert expected == actual
def test_parse_game_server_cluster_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"realm": "cuttlefish",
"cluster": "mussel",
}
path = GameServerClustersServiceClient.game_server_cluster_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_game_server_cluster_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = GameServerClustersServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = GameServerClustersServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = GameServerClustersServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = GameServerClustersServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = GameServerClustersServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = GameServerClustersServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = GameServerClustersServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = GameServerClustersServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = GameServerClustersServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = GameServerClustersServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = GameServerClustersServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-game-servers
|
tests/unit/gapic/gaming_v1beta/test_game_server_clusters_service.py
|
Python
|
apache-2.0
| 123,914
|
[
"Octopus"
] |
ee3e84267a4ca5f8f7e822e069430a9ec604f91a48d5813a8926aef7c27273e1
|
"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data.
Written by John Travers <jtravs@gmail.com>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
Copyright (c) 2007, John Travers <jtravs@gmail.com>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy import linalg
from scipy._lib.six import callable, get_method_function, get_function_code
from scipy.special import xlogy
__all__ = ['Rbf']
class Rbf(object):
"""
Rbf(*args)
A class for radial basis function approximation/interpolation of
n-dimensional scattered data.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(default is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g, the default::
def euclidean_norm(x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
which is called with x1=x1[ndims,newaxis,:] and
x2=x2[ndims,:,newaxis] such that the result is a matrix of the
distances from each point in x1 to each point in x2.
Examples
--------
>>> from scipy.interpolate import Rbf
>>> x, y, z, d = np.random.rand(4, 50)
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> xi = yi = zi = np.linspace(0, 1, 20)
>>> di = rbfi(xi, yi, zi) # interpolated values
>>> di.shape
(20,)
"""
def _euclidean_norm(self, x1, x2):
return np.sqrt(((x1 - x2)**2).sum(axis=0))
def _h_multiquadric(self, r):
return np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return np.exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
return xlogy(r**2, r)
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self) if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "im_func"):
val = get_method_function(self.function)
elif hasattr(self.function, "__call__"):
val = get_method_function(self.function.__call__)
else:
raise ValueError("Cannot determine number of arguments to function")
argcount = get_function_code(val).co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
if sys.version_info[0] >= 3:
self._function = self.function.__get__(self, Rbf)
else:
import new
self._function = new.instancemethod(self.function, self,
Rbf)
else:
raise ValueError("Function argument must take 1 or 2 arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of the same shape")
return a0
def __init__(self, *args, **kwargs):
self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.di = np.asarray(args[-1]).flatten()
if not all([x.size == self.di.size for x in self.xi]):
raise ValueError("All arrays must be equal length.")
self.norm = kwargs.pop('norm', self._euclidean_norm)
r = self._call_norm(self.xi, self.xi)
self.epsilon = kwargs.pop('epsilon', None)
if self.epsilon is None:
# default epsilon is the "the average distance between nodes" based
# on a bounding hypercube
dim = self.xi.shape[0]
ximax = np.amax(self.xi, axis=1)
ximin = np.amin(self.xi, axis=1)
edges = ximax-ximin
edges = edges[np.nonzero(edges)]
self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self
# for use by any user-callable function or
# to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
self.A = self._init_function(r) - np.eye(self.N)*self.smooth
self.nodes = linalg.solve(self.A, self.di)
def _call_norm(self, x1, x2):
if len(x1.shape) == 1:
x1 = x1[np.newaxis, :]
if len(x2.shape) == 1:
x2 = x2[np.newaxis, :]
x1 = x1[..., :, np.newaxis]
x2 = x2[..., np.newaxis, :]
return self.norm(x1, x2)
def __call__(self, *args):
args = [np.asarray(x) for x in args]
if not all([x.shape == y.shape for x in args for y in args]):
raise ValueError("Array lengths must be equal")
shp = args[0].shape
xa = np.asarray([a.flatten() for a in args], dtype=np.float_)
r = self._call_norm(xa, self.xi)
return np.dot(self._function(r), self.nodes).reshape(shp)
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/interpolate/rbf.py
|
Python
|
mit
| 9,536
|
[
"Gaussian"
] |
24efda41ea8f4e4ea4ab3dc20647d766fee82dc2429588daf951663f69cf992c
|
#!/usr/bin/env python
from __future__ import division, print_function
import os
from math import log
from collections import namedtuple, OrderedDict, deque
import time
import threading
import json
import gc
try:
from Queue import Queue
except ImportError:
from queue import Queue
import io
import logging
from fractions import Fraction
import numpy
from PIL import Image
import pyexiv2
from picamera import PiCamera
from scipy.signal import convolve, gaussian, savgol_coeffs
from exposure import lens
logger = logging.getLogger("camera")
import signal
try:
import colors as _colors
except ImportError:
logger.warning("Colors module not available, using slow Python implementation")
colors = None
else:
colors = _colors.Flatfield("flatfield.txt")
sRGB = _colors.SRGB()
ExpoRedBlue = namedtuple("ExpoRedBlue", ("ev", "red", "blue"))
GainRedBlue = namedtuple("GainRedBlue", ("red", "blue"))
class SavGol(object):
"Class for Savitsky-Golay filtering"
def __init__(self, order=2):
"select the order of the filter"
self.order = order
self.cache = {} #len, filter
def __call__(self, lst):
"filter a list. the last having the more weight"
l = len(lst)
if l%2 == 0:
lst = numpy.array(lst[1:])
l -= 1
else:
lst = numpy.array(lst)
if len(lst) < self.order:
return lst[-1]
if l not in self.cache:
self.cache[l] = savgol_coeffs(l, self.order, pos=0)
return numpy.dot(lst, self.cache[l])
savgol0 = SavGol(0)
savgol1 = SavGol(1)
class Frame(object):
"""This class holds one image"""
BINNING = 1
INDEX = 0
semclass = threading.Semaphore()
# YUV conversion matrix from ITU-R BT.601 version (SDTV)
# Y U V
YUV2RGB = numpy.array([[1.164, 0.000, 1.596], # R
[1.164, -0.392, -0.813], # G
[1.164, 2.017, 0.000]]).T # B
def __init__(self, data):
"Constructor"
self.timestamp = time.time()
with self.semclass:
self.index = self.INDEX
self.__class__.INDEX += 1
self.data = data
self.camera_meta = {}
self.gravity = None
self.position = None
self.servo_status = None
self.sem = threading.Semaphore()
self._yuv = None
self._rgb = None
self._histograms = None
def __repr__(self):
return "Frame #%04i"%self.index
def get_date_time(self):
return time.strftime("%Y-%m-%d-%Hh%Mm%Ss", time.localtime(self.timestamp))
@property
def yuv(self):
"""Retrieve the YUV array, binned 2x2 or not depending on the BINNING class attribute"""
if self._yuv is None:
with self.sem:
if self._yuv is None:
resolution = self.camera_meta.get("resolution", (640, 480))
if colors:
yuv = _colors.yuv420_to_yuv(self.data, resolution)[0]
else:
width, height = resolution
fwidth = (width + 31) & ~(31)
fheight = (height + 15) & ~ (15)
ylen = fwidth * fheight
uvlen = ylen // 4
ary = numpy.frombuffer(self.data, dtype=numpy.uint8)
if self.BINNING == 2:
Y_full = (ary[:ylen]).astype(numpy.int16)
Y_full.shape = (fheight, fwidth)
Y = (Y_full[::2, ::2] + Y_full[::2, 1::2] + Y_full[1::2, ::2] + Y_full[1::2, 1::2]) // 4
U = ary[ylen: - uvlen].reshape((fheight // 2, fwidth // 2))
V = ary[-uvlen:].reshape((fheight // 2, fwidth // 2))
yuv = numpy.dstack((Y.astype(numpy.uint8), U, V))[:height // 2, :width // 2, :]
else:
# Reshape the values into two dimensions, and double the size of the
# U and V values (which only have quarter resolution in YUV4:2:0)
Y = (ary[:ylen]).reshape((fheight, fwidth))
U = (ary[ylen: - uvlen]).reshape((fheight // 2, fwidth // 2)).repeat(2, axis=0).repeat(2, axis=1)
V = (ary[-uvlen:]).reshape((fheight // 2, fwidth // 2)).repeat(2, axis=0).repeat(2, axis=1)
# Stack the channels together and crop to the actual resolution
yuv = numpy.dstack((Y, U, V))[:height, :width, :]
self._yuv = yuv
return self._yuv
@property
def rgb(self):
"""retrieve the image a RGB array. Takes 13s"""
if self._rgb is None:
if colors is None:
YUV = self.yuv.astype(numpy.int16)
with self.sem:
if self._rgb is None:
if colors:
resolution = self.camera_meta.get("resolution", (640, 480))
self._rgb, self._histograms = colors.yuv420_to_rgb16(self.data, resolution)
else:
YUV[:, :, 0] = YUV[:, :, 0] - 16 # Offset Y by 16
YUV[:, :, 1:] = YUV[:, :, 1:] - 128 # Offset UV by 128
# Calculate the dot product with the matrix to produce RGB output,
# clamp the results to byte range and convert to bytes
self._rgb = (YUV.dot(self.YUV2RGB)*257.0).clip(0, 65535).astype(numpy.uint16)
return self._rgb
@property
def histograms(self):
"""Calculate the 4 histograms with Y,R,G,B"""
if self._histograms is None:
if colors is None:
histograms = numpy.zeros((4, 256), numpy.int32)
histograms[0] = numpy.bincount(self.yuv[:, :, 0].ravel(), minlength=256)
histograms[1] = numpy.bincount(self.rgb[:, :, 0].ravel(), minlength=256)
histograms[2] = numpy.bincount(self.rgb[:, :, 1].ravel(), minlength=256)
histograms[3] = numpy.bincount(self.rgb[:, :, 2].ravel(), minlength=256)
self._histograms = histograms
else:
rgb = self.rgb
return self._histograms
@classmethod
def load(cls, fname):
"""load the raw data on one side and the header on the other"""
with open(fname) as f:
new = cls(f.read())
jf = fname[:-3] + "json"
if os.path.exists(jf):
with open(jf) as f:
new.camera_meta = json.load(f)
if "index" in new.camera_meta:
new.index = new.camera_meta["index"]
return new
def save(self):
"Save the data as YUV raw data"
fname = self.get_date_time()+".yuv"
with open(fname, "w") as f:
f.write(self.data)
fname = self.get_date_time()+".json"
comments = OrderedDict((("index", self.index),))
if self.position:
comments["pan"] = self.position.pan
comments["tilt"] = self.position.tilt
if self.gravity:
comments["gx"] = self.gravity.x
comments["gy"] = self.gravity.y
comments["gz"] = self.gravity.z
comments.update(self.camera_meta)
with open(fname, "w") as f:
f.write(json.dumps(comments, indent=4))
logger.info("Saved YUV raw data %i %s", self.index, fname)
class StreamingOutput(object):
"""This class handles the stream, it re-cycles a BytesIO and provides frames"""
def __init__(self, size):
"""Constructor
:param size: size of an image in bytes.
For YUV, it is 1.5x the number of pixel of the padded image.
"""
self.size = size
self.frame = None
self.buffer = io.BytesIO()
self.condition = threading.Condition()
def write(self, buf):
res = self.buffer.write(buf)
if self.buffer.tell() >= self.size:
#image complete
self.buffer.truncate(self.size)
# New frame, copy the existing buffer's content and notify all
# clients it's available
with self.condition:
self.frame = Frame(self.buffer.getvalue())
self.condition.notify_all()
self.buffer.seek(0)
else:
print("Incomplete buffer of %i bytes"%self.buffer.tell())
return res
class Camera(threading.Thread):
"A class for acquiring continusly images..."
def __init__(self, resolution=(3280, 2464), framerate=1, sensor_mode=3,
avg_ev=21, avg_wb=31, histo_ev=None, wb_red=None, wb_blue=None,
quit_event=None, queue=None, config_queue=None):
"""This thread handles the camera
"""
threading.Thread.__init__(self, name="Camera")
signal.signal(signal.SIGINT, self.quit)
self.quit_event = quit_event or threading.Event()
self._can_record = threading.Event()
self._done_recording = threading.Event()
self._done_recording.set()
self._can_record.set()
self.queue = queue or Queue()
self.config_queue = config_queue or Queue()
self.avg_ev = avg_ev
self.avg_wb = avg_wb
self.histo_ev = histo_ev or []
self.wb_red = wb_red or []
self.wb_blue = wb_blue or []
raw_size = (((resolution[0]+31)& ~(31))*((resolution[1]+15)& ~(15))*3//2)
self.stream = StreamingOutput(raw_size)
self.camera = PiCamera(resolution=resolution, framerate=framerate, sensor_mode=sensor_mode)
def __del__(self):
self.camera = self.stream = None
def quit(self, *arg, **kwarg):
"quit the main loop and end the thread"
self.quit_event.set()
def pause(self, wait=True):
"pause the recording, wait for the current value to be acquired"
self._can_record.clear()
if wait:
self._done_recording.wait()
def resume(self):
"resume the recording"
self._can_record.set()
def get_config(self):
config = OrderedDict([("resolution", tuple(self.camera.resolution)),
("framerate", float(self.camera.framerate)),
("sensor_mode", self.camera.sensor_mode),
("avg_ev", self.avg_ev),
("avg_wb", self.avg_wb),
("hist_ev", self.histo_ev),
("wb_red", self.wb_red),
("wb_blue", self.wb_blue)])
return config
def set_config(self, dico):
self.camera.resolution = dico.get("resolution", self.camera.resolution)
self.camera.framerate = dico.get("framerate", self.camera.framerate)
self.camera.sensor_mode = dico.get("sensor_mode", self.camera.sensor_mode)
self.wb_red = dico.get("wb_red", self.wb_red)
self.wb_blue = dico.get("wb_blue", self.wb_blue)
self.histo_ev = dico.get("histo_ev", self.histo_ev)
self.avg_ev = dico.get("avg_ev", self.avg_ev)
self.avg_wb = dico.get("avg_wb", self.avg_wb)
def set_analysis(self, do_analysis):
if do_analysis:
self.camera.awb_mode = "off" # "auto"
self.camera.exposure_mode = "off" #night" #"auto"
else:
self.camera.awb_mode = "auto"
self.camera.exposure_mode = "auto"
def get_metadata(self):
metadata = {"iso": float(self.camera.iso),
"analog_gain": float(self.camera.analog_gain),
"awb_gains": [float(i) for i in self.camera.awb_gains],
"digital_gain": float(self.camera.digital_gain),
"exposure_compensation": float(self.camera.exposure_compensation),
"exposure_speed": float(self.camera.exposure_speed),
"exposure_mode": self.camera.exposure_mode,
"framerate": float(self.camera.framerate),
"revision": self.camera.revision,
"shutter_speed": float(self.camera.shutter_speed),
"aperture": lens.aperture,
"resolution": self.camera.resolution}
if metadata['revision'] == "imx219":
metadata['iso_calc'] = 54.347826086956516 * metadata["analog_gain"] * metadata["digital_gain"]
else:
metadata['iso_calc'] = 100.0 * metadata["analog_gain"] * metadata["digital_gain"]
return metadata
def warm_up(self, delay=10):
"warm up the camera"
logger.info("warming up the camera for %ss",delay)
framerate = self.camera.framerate
self.camera.awb_mode = "auto"
self.camera.exposure_mode = "auto"
self.camera.framerate = 10
for i in range(delay):
rg, bg = self.camera.awb_gains
rg = float(rg)
bg = float(bg)
if rg == 0.0:
rg = 1.0
if bg == 0.0:
bg = 1.0
self.wb_red.append(rg)
self.wb_blue.append(bg)
time.sleep(1)
self.camera.framerate = framerate
def run(self):
"main thread activity"
#self.camera.awb_mode = "off" # "auto"
#self.camera.exposure_mode = "off" #night" #"auto"
self._done_recording.clear()
for foo in self.camera.capture_continuous(self.stream, format='yuv'):
self._done_recording.set()
if self.stream.frame is not None:
frame = self.stream.frame
logger.debug("Acquired %s", frame)
frame.camera_meta = self.get_metadata()
self.queue.put(frame)
else:
logger.info("No frame acquired")
if self.quit_event.is_set():
break
# update the camera settings if needed:
# Disabled for now at trajlaps level
if not self.config_queue.empty():
while not self.config_queue.empty():
evrb = self.config_queue.get()
if evrb.red:
self.wb_red.append(evrb.red)
self.wb_blue.append(evrb.blue)
if evrb.ev:
self.histo_ev.append(evrb.ev)
self.config_queue.task_done()
self.update_expo()
self._can_record.wait()
self._done_recording.clear()
self.camera.close()
def update_expo(self):
"""This method updates the white balance, exposure time and gain
according to the history
"""
#return #disabled for now
if len(self.wb_red) * len(self.wb_blue) == 0:
return
if len(self.wb_red) > self.avg_wb:
self.wb_red = self.wb_red[-self.avg_wb:]
self.wb_blue = self.wb_blue[-self.avg_wb:]
if len(self.histo_ev) > self.avg_ev:
self.histo_ev = self.histo_ev[-self.avg_ev:]
self.camera.awb_gains = (savgol0(self.wb_red),
savgol0(self.wb_blue))
ev = savgol1(self.histo_ev)
speed = lens.calc_speed(ev)
#if self.camera.revision == "imx219":
# speed *= 1.84
framerate = float(self.camera.framerate)
logger.info("Update speed: %s %s",speed,framerate)
if speed > framerate:
self.camera.shutter_speed = int(1000000. / framerate / speed)
self.camera.iso = 100
elif speed > framerate * 2:
self.camera.shutter_speed = int(2000000. / framerate / speed)
self.camera.iso = 200
elif speed > framerate * 4:
self.camera.shutter_speed = int(4000000. / framerate / speed)
self.camera.iso = 400
else:
self.camera.shutter_speed = min(int(8000000. / framerate / speed), int(1000000/framerate))
self.camera.iso = 800
# #TODO: how to change framerate ? maybe start with low
class Saver(threading.Thread):
"This thread is in charge of saving the frames arriving from the queue on the disk"
def __init__(self, folder="/mnt", queue=None, quit_event=None):
threading.Thread.__init__(self, name="Saver")
self.queue = queue or Queue()
self.quit_event = quit_event or threading.Signal()
self.folder = os.path.abspath(folder)
if not os.path.exists(self.folder):
logger.warning("Creating folder %s", self.folder)
os.makedirs(self.folder)
def run(self):
while not self.quit_event.is_set():
t0 = time.time()
frames = self.queue.get()
if frames:
frame = frames.pop()
if not frame:
continue
comments = OrderedDict((("index", frame.index),
("summed", 1)))
exposure_speed = frame.camera_meta.get("exposure_speed", 1)
RGB16 = frame.rgb
if exposure_speed > 62000.0: #1/16 seconde
#2e5/frame.camera_meta.get("framerate"):
while frames:
other = frames.pop()
#merge in linear RGB space
summed, over = sRGB.sum(RGB16, other.rgb)
if over:
break
else:
RGB16 = summed
comments["summed"] += 1
exposure_speed += other.camera_meta.get("exposure_speed", 1)
frames = None
gc.collect()
name = os.path.join(self.folder, frame.get_date_time()+".jpg")
logger.info("Save frame #%i as %s sum of %i", frame.index, name, comments["summed"])
rgb8 = sRGB.compress(RGB16)
Image.fromarray(rgb8).save(name, quality=90, optimize=True, progressive=True)
exif = pyexiv2.ImageMetadata(name)
exif.read()
speed = Fraction(int(exposure_speed), 1000000)
iso = int(frame.camera_meta.get("iso_calc"))
exif["Exif.Photo.FNumber"] = Fraction(int(frame.camera_meta.get("aperture") * 100), 100)
exif["Exif.Photo.ExposureTime"] = speed
exif["Exif.Photo.ISOSpeedRatings"] = iso
if frame.position:
comments["pan"] = frame.position.pan
comments["tilt"] = frame.position.tilt
if frame.gravity:
comments["gx"] = frame.gravity.x
comments["gy"] = frame.gravity.y
comments["gz"] = frame.gravity.z
comments.update(frame.camera_meta)
if frame.servo_status:
comments.update(frame.servo_status)
exif.comment = json.dumps(comments)
exif.write(preserve_timestamps=True)
self.queue.task_done()
logger.info("Saving of frame #%i took %.3fs, sum of %s", frame.index, time.time() - t0, comments["summed"])
class Analyzer(threading.Thread):
"This thread is in charge of analyzing the image and suggesting new exposure value and white balance"
def __init__(self, frame_queue=None, config_queue=None, quit_event=None):
threading.Thread.__init__(self, name="Analyzer")
self.queue = frame_queue or Queue()
self.output_queue = config_queue or Queue()
self.quit_event = quit_event or threading.Signal()
#self.history = []
#self.max_size = 100
#i = numpy.arange(40)
#j = 0.5 ** (0.25 * i)
#k = ((j + 0.099) / 1.099) ** (1 / 0.45) * (235 - 16) + 16
#m2 = j < 0.018
#k[m2] = (235-16) / 4.5 * j[m2] + 16
#kr = numpy.round(k).astype(int)
#self.ukr = numpy.concatenate(([0], numpy.sort(numpy.unique(kr)), [256]))
#start = -0.25*(self.ukr.size-1)+0.5
#self.delta_expo = numpy.arange(start, 0.5, 0.25)
#self.g19_2 = gaussian(19, 2)
#self.g19_2 /= self.g19_2.sum()
def run(self):
"""This executed in a thread"""
target_rgb = 5e-4 # pixels at 99.5% should be white
while not self.quit_event.is_set():
frame = self.queue.get()
t0 = time.time()
ev = oldev = lens.calc_EV(1000000/frame.camera_meta.get("exposure_speed", 1), iso=frame.camera_meta.get("iso_calc",100))
histo = frame.histograms
if 1: #for exposure calculation:
ylin = histo[0]
ymax = numpy.where(ylin)[0][-1]
#logger.info("ymax: %s", ymax)
if ymax>1000:
logger.debug("exposition %s is correct to over %s", ev, ymax)
cs = ylin.cumsum()
lim = 16
lo_light = cs[lim-1]
hi_light = cs[-1] - cs[-lim]
if hi_light > lo_light: #over exposed
if lo_light == 0:
ev += 1
else:
log(1.0 * hi_light/lo_light, 2)
logger.info("image is over-exposed, let's shrink %s %s eV: %s->%s", lo_light, hi_light, oldev, ev)
else:
ev += log(1.0 * ymax / ylin.size, 2)
logger.info("image is under exposed, let's boost it %s eV: %s->%s", ymax, oldev, ev)
if 1: #Calculation of the corrected white-balance
csr = numpy.cumsum(histo[1])
csg = numpy.cumsum(histo[2])
csb = numpy.cumsum(histo[3])
if (csr[-1] != csg[-1]) or (csg[-1] != csb[-1]):
logger.error("Different number of pixel in chanels R, G and B: %s", histo.sum(axis=-1))
pos = csr[-1] * (1.0 - target_rgb)
try:
pos_r = numpy.where(csr >= pos)[0][0]
pos_g = numpy.where(csg >= pos)[0][0]
pos_b = numpy.where(csb >= pos)[0][0]
except IndexError as e:
logger.error("no awb %s, exposure to low ",e)
#self.queue.task_done()
#continue
pos_r = numpy.where(histo[1])[0][-1]
pos_g = numpy.where(histo[2])[0][-1]
pos_b = numpy.where(histo[3])[0][-1]
rg, bg = frame.camera_meta.get("awb_gains", (1.0, 1.0))
if rg == 0.0:
rg = 1.0
if bg == 0.0:
bg = 1.0
try:
red_gain = 1.0 * rg * pos_g / pos_r
blue_gain = 1.0 * bg * pos_g / pos_b
logger.info("Update Red: %s -> %s Blue %s -> %s r%s g%s b%s", rg, red_gain, bg, blue_gain, pos_r, pos_g, pos_b)
#awb = GainRedBlue(min(8, max(0.125, red_gain)), min(8, max(0.125, blue_gain)))
except ZeroDivisionError:
logger.error("pos_r %s, pos_g %s, pos_b %s, rg %s, bg %s", pos_r, pos_g, pos_b, rg, bg)
red_gain = rg
blue_gain = bg
#awb = GainRedBlue(rg, bg)
else:
red_gain = None
blue_gain = None
now = time.time()
awb = ExpoRedBlue(ev, min(8, max(0.125, red_gain)), min(8, max(0.125, blue_gain)))
self.output_queue.put(awb)
self.queue.task_done()
logger.info("Analysis of frame #%i took: %.3fs, delay since acquisition: %.3fs", frame.index, now-t0, now-frame.timestamp)
|
kif/yocto-meteo-plugin
|
camera.py
|
Python
|
mit
| 24,206
|
[
"Gaussian"
] |
d6a5e34acb95aa011eab968615b9530f2171baaa7d9d68dd68e1fc6ba9479184
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pootle.core.mixins.treeitem
class Migration(migrations.Migration):
dependencies = [
("pootle_app", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Language",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"code",
models.CharField(
help_text='ISO 639 language code for the language, possibly followed by an underscore (_) and an ISO 3166 country code. <a href="http://www.w3.org/International/articles/language-tags/">More information</a>',
unique=True,
max_length=50,
verbose_name="Code",
db_index=True,
),
),
(
"fullname",
models.CharField(max_length=255, verbose_name="Full Name"),
),
(
"specialchars",
models.CharField(
help_text="Enter any special characters that users might find difficult to type",
max_length=255,
verbose_name="Special Characters",
blank=True,
),
),
(
"nplurals",
models.SmallIntegerField(
default=0,
help_text='For more information, visit <a href="http://docs.translatehouse.org/projects/localization-guide/en/latest/l10n/pluralforms.html">our page</a> on plural forms.',
verbose_name="Number of Plurals",
choices=[
(0, "Unknown"),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
],
),
),
(
"pluralequation",
models.CharField(
help_text='For more information, visit <a href="http://docs.translatehouse.org/projects/localization-guide/en/latest/l10n/pluralforms.html">our page</a> on plural forms.',
max_length=255,
verbose_name="Plural Equation",
blank=True,
),
),
(
"directory",
models.OneToOneField(
editable=False,
to="pootle_app.Directory",
on_delete=models.CASCADE,
),
),
],
options={"ordering": ["code"], "db_table": "pootle_app_language",},
bases=(models.Model, pootle.core.mixins.treeitem.TreeItem),
),
]
|
evernote/zing
|
pootle/apps/pootle_language/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 3,360
|
[
"VisIt"
] |
d6f7927d3c7506c9b49962e8885bf27cbc9e76103de05e019a0d847bf08b2706
|
# coding utf8
import setuptools
from setuptools import setup
from GetOrganelleLib.versions import get_versions
import platform
import sys
import os
# system info
SYSTEM_NAME = ""
if platform.system() == "Linux":
SYSTEM_NAME = "linux"
elif platform.system() == "Darwin":
SYSTEM_NAME = "macOS"
else:
sys.stdout.write("Error: currently GetOrganelle is not supported for " + platform.system() + "! ")
exit()
# python version
MAJOR_VERSION, MINOR_VERSION = sys.version_info[:2]
if MAJOR_VERSION == 2 and MINOR_VERSION >= 7:
pass
elif MAJOR_VERSION == 3 and MINOR_VERSION >= 5:
pass
else:
sys.stdout.write("Python version have to be 2.7+ or 3.5+")
sys.exit(0)
sys.stdout.write("Python " + str(sys.version).replace("\n", " ") + "\n")
sys.stdout.write("PLATFORM: " + " ".join(platform.uname()) + "\n")
sys.stdout.write("Using setuptools " + str(setuptools.__version__) + "\n")
# python libs
install_dependencies = []
try:
import numpy
except ImportError:
if MAJOR_VERSION == 3:
install_dependencies.append("numpy>=1.16.4")
else:
install_dependencies.append("numpy==1.16.4")
else:
sys.stdout.write("Existed module numpy " + str(numpy.__version__) + "\n")
try:
import scipy
except ImportError:
if MAJOR_VERSION == 3:
install_dependencies.append("scipy>=1.3.0")
else:
# higher version not compatible with python2
install_dependencies.append("scipy==1.2.1")
else:
sys.stdout.write("Existed module numpy " + str(scipy.__version__) + "\n")
try:
import sympy
except ImportError:
if MAJOR_VERSION == 3:
install_dependencies.append("sympy>=1.4")
else:
install_dependencies.append("sympy==1.4")
else:
sys.stdout.write("Existed module sympy " + str(sympy.__version__) + "\n")
try:
import requests
except ImportError:
install_dependencies.append("requests[security]")
else:
sys.stdout.write("Existed module requests " + str(requests.__version__) + "\n")
PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0]
LIB_NAME = "GetOrganelleLib"
# LIB_DIR = os.path.join(PATH_OF_THIS_SCRIPT, LIB_NAME)
DEP_NAME = "GetOrganelleDep"
DEP_DIR = os.path.join(PATH_OF_THIS_SCRIPT, DEP_NAME)
LBL_NAME = "LabelDatabase"
# LBL_DIR = os.path.join(PATH_OF_THIS_SCRIPT, LIB_NAME, LBL_NAME)
SEQ_NAME = "SeedDatabase"
# SEQ_DIR = os.path.join(PATH_OF_THIS_SCRIPT, LIB_NAME, SEQ_NAME)
if "--continue" in sys.argv:
RESUME = True
sys.argv.remove("--continue")
else:
RESUME = False
if "--in-situ" in sys.argv:
in_situ = True
sys.argv.remove("--in-situ")
else:
in_situ = False
if "--keep-temp" in sys.argv:
keep_temp = True
sys.argv.remove("--keep-temp")
else:
keep_temp = False
def get_recursive_files(target_dir, start_from="", exclude_files=None):
if exclude_files is None:
exclude_files = set()
assert target_dir.startswith(start_from), "target_dir should be starting with start_from!"
omit_len = len(start_from.rstrip("/") + "/") if start_from else 0
for f_dir, sub_dirs, files in os.walk(target_dir):
for i_file in files:
if not i_file.startswith(".") and os.path.join(f_dir, i_file)[omit_len:] not in exclude_files:
yield os.path.join(f_dir, i_file)[omit_len:]
EXCLUDE_SHARE_SPADES_PATHS = set()
scripts_to_install = ["get_organelle_from_reads.py",
"get_organelle_from_assembly.py",
"Utilities/check_annotations.py",
"Utilities/cook_coding_for_blast.py",
"Utilities/disentangle_organelle_assembly.py",
"Utilities/evaluate_assembly_using_mapping.py",
"Utilities/fastg_to_gfa.py",
"Utilities/get_organelle_config.py",
"Utilities/get_pair_reads.py",
"Utilities/gfa_to_fastg.py",
"Utilities/gfa_to_fasta.py",
"Utilities/join_spades_fastg_by_blast.py",
"Utilities/make_batch_for_iteratively_mapping_assembling.py",
"Utilities/make_batch_for_get_organelle.py",
"Utilities/plastome_arch_info.py",
"Utilities/rm_low_coverage_duplicated_contigs.py",
"Utilities/round_statistics.py",
"Utilities/slim_graph.py",
"Utilities/summary_get_organelle_output.py",
"Utilities/reconstruct_graph_from_fasta.py"]
# rename execution program if not python
dep_scripts_to_change = []
if os.path.isdir(os.path.join(DEP_DIR, SYSTEM_NAME, "SPAdes", "bin")):
for spades_script in os.listdir(os.path.join(DEP_DIR, SYSTEM_NAME, "SPAdes", "bin")):
if spades_script.endswith(".py") and not spades_script.startswith("."):
dep_scripts_to_change.append(os.path.join(DEP_DIR, SYSTEM_NAME, "SPAdes", "bin", spades_script))
if os.path.exists(os.path.join(DEP_DIR, SYSTEM_NAME, "bowtie2", "bowtie2-build")):
dep_scripts_to_change.append(os.path.join(DEP_DIR, SYSTEM_NAME, "bowtie2", "bowtie2-build"))
if os.path.basename(sys.executable) != "python":
for rename_py_script in scripts_to_install + dep_scripts_to_change:
original_lines = open(rename_py_script, encoding="utf-8").readlines()
original_lines[0] = "#!" + sys.executable + "\n"
open(rename_py_script, "w", encoding="utf-8").writelines(original_lines)
# check local BLAST
if os.path.exists(os.path.join(DEP_DIR, SYSTEM_NAME, "ncbi-blast")):
files_to_check = ["blastn", "makeblastdb"]
for check_f in files_to_check:
check_file_path = os.path.join(DEP_DIR, SYSTEM_NAME, "ncbi-blast", check_f)
if not os.path.exists(check_file_path):
raise EnvironmentError(check_file_path + " not exists!")
os.chmod(check_file_path, 0o755)
# check local Bowtie2
if os.path.exists(os.path.join(DEP_DIR, SYSTEM_NAME, "bowtie2")):
files_to_check = ["bowtie2", "bowtie2-align-l", "bowtie2-build", "bowtie2-build-l"]
for check_f in files_to_check:
check_file_path = os.path.join(DEP_DIR, SYSTEM_NAME, "bowtie2", check_f)
if not os.path.exists(check_file_path):
raise EnvironmentError(check_file_path + " not exists!")
os.chmod(check_file_path, 0o755)
# check local SPAdes
if os.path.exists(os.path.join(DEP_DIR, SYSTEM_NAME, "SPAdes")):
files_to_check = ["metaspades.py", "spades-bwa", "spades-gbuilder", "spades-ionhammer", "spades.py",
"plasmidspades.py", "spades-core", "spades-gmapper", "spades-kmercount", "spades_init.py",
"rnaspades.py", "spades-corrector-core", "spades-hammer", "spades-truseq-scfcorrection",
"truspades.py"]
for check_f in files_to_check:
check_file_path = os.path.join(DEP_DIR, SYSTEM_NAME, "SPAdes", "bin", check_f)
if not os.path.exists(check_file_path):
raise EnvironmentError(check_file_path + " not exists!")
os.chmod(check_file_path, 0o755)
files_to_check = ["configs", "spades_pipeline"]
for check_f in files_to_check:
check_file_path = os.path.join(DEP_DIR, SYSTEM_NAME, "SPAdes", "share", "spades", check_f)
if not os.path.exists(check_file_path):
raise EnvironmentError(check_file_path + " not exists!")
PACKAGES = [LIB_NAME]
PACKAGE_DATA = {}
# PACKAGE_DATA = {LIB_NAME: [os.path.join(LBL_NAME, "VERSION"),
# os.path.join(SEQ_NAME, "VERSION")]}
if os.path.isdir(DEP_DIR) and os.path.isfile(os.path.join(DEP_DIR, "__init__.py")):
PACKAGES.append(DEP_NAME)
PACKAGE_DATA[DEP_NAME] = [this_file
for this_file in
get_recursive_files(target_dir=os.path.join(DEP_DIR, SYSTEM_NAME),
start_from=DEP_DIR, exclude_files=EXCLUDE_SHARE_SPADES_PATHS)]
if not in_situ:
setup(
name="GetOrganelle",
version=get_versions(),
description="a fast and versatile toolkit for accurate de novo assembly of organelle genomes.",
author="Jian-Jun Jin",
author_email="jj3111@columbia.edu",
url="http://github.com/Kinggerm/GetOrganelle",
license="GNU General Public License, version 3",
packages=PACKAGES,
platforms="linux/MacOS",
scripts=scripts_to_install,
# relative path to each package
package_data=PACKAGE_DATA,
install_requires=install_dependencies,
zip_safe=False
)
if keep_temp:
for temp_dir_or_files in ("build", "dist", "*.pyc", "*.tgz", "*.egg-info"):
os.system("rm -vrf " + str(os.path.join(PATH_OF_THIS_SCRIPT, temp_dir_or_files)))
else:
for temp_dir_or_files in ("build", "dist", "*.pyc", "*.tgz", "*.egg-info",
os.path.join(LIB_NAME, LBL_NAME, "*.n*"), os.path.join(LIB_NAME, SEQ_NAME, "*.bt2l")):
os.system("rm -vrf " + str(os.path.join(PATH_OF_THIS_SCRIPT, temp_dir_or_files)))
else:
for script_chmod in scripts_to_install:
os.chmod(script_chmod, 0o755)
|
Kinggerm/GetOrganelle
|
setup.py
|
Python
|
gpl-3.0
| 9,179
|
[
"BLAST",
"BWA"
] |
90c6e3365dcf77e6947ac9c01be1d3e28fe1307271c196dda18f335e6e2b1afd
|
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array
from .utils import (orthonormalize, random_orthonormal, weighted_mean,
solve_weighted, check_array_with_weights)
class EMPCA(BaseEstimator, TransformerMixin):
"""Expectation-Maximization PCA
This is an iterative implementation of weighted PCA based on an
Expectation-Maximization approach, following Bailey (2012) [1]_.
Parameters
----------
n_components : int (optional)
Number of components to keep. If not specified, all components are kept
max_iter : int (default=100)
Maximum number of Expectation-Maximization iterations
random_state : int or None
Seed for the random initialization of eigenvectors
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ : array, [n_components]
The amount of variance explained by each of the selected components.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
See Also
--------
- PCA
- WPCA
- sklearn.decomposition.PCA
References
----------
.. [1] Bailey, S. PASP 124:919 (2012)
http://arxiv.org/abs/1208.4122
"""
def __init__(self, n_components=None, max_iter=100, random_state=None):
self.n_components = n_components
self.max_iter = max_iter
self.random_state = random_state
def _Estep(self, data, weights, eigvec):
"""E-step: solve for coeff given eigvec"""
if weights is None:
return np.dot(data, eigvec.T)
else:
return np.array([solve_weighted(eigvec.T, data[i], weights[i])
for i in range(data.shape[0])])
def _Mstep(self, data, weights, eigvec, coeff):
"""M-step: solve for eigvec given coeff"""
w2 = 1 if weights is None else weights ** 2
for i in range(eigvec.shape[0]):
# remove contribution of previous eigenvectors from data
d = data - np.dot(coeff[:, :i], eigvec[:i])
c = coeff[:, i:i + 1]
eigvec[i] = np.dot(c.T, w2 * d) / np.dot(c.T, w2 * c)
# orthonormalize computed vectors: in theory not necessary,
# but numerically it's a good idea
# TODO: perhaps do this more efficiently?
eigvec[:i + 1] = orthonormalize(eigvec[:i + 1])
return eigvec
def fit_transform(self, X, y=None, weights=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
weights: array-like, shape (n_samples, n_features)
Non-negative weights encoding the reliability of each measurement.
Equivalent to the inverse of the Gaussian errorbar.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X, weights = check_array_with_weights(X, weights)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
self.mean_ = weighted_mean(X, weights, axis=0)
X_c = X - self.mean_
eigvec = random_orthonormal(n_components, X.shape[1],
random_state=self.random_state)
# TODO: add a convergence check
for k in range(self.max_iter):
coeff = self._Estep(X_c, weights, eigvec)
eigvec = self._Mstep(X_c, weights, eigvec, coeff)
coeff = self._Estep(X_c, weights, eigvec)
self.components_ = eigvec
self.explained_variance_ = (coeff ** 2).sum(0) / X.shape[0]
if weights is None:
total_var = X_c.var(0).sum()
else:
XW = X_c * weights
total_var = np.sum((XW ** 2).sum(0) / (weights ** 2).sum(0))
self.explained_variance_ratio_ = (self.explained_variance_ / total_var)
return coeff
def fit(self, X, y=None, weights=None):
"""Compute principal components for X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
weights: array-like, shape (n_samples, n_features)
Non-negative weights encoding the reliability of each measurement.
Equivalent to the inverse of the Gaussian errorbar.
Returns
-------
self : object
Returns the instance itself.
"""
self.fit_transform(X, weights=weights)
return self
def transform(self, X, weights=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
weights: array-like, shape (n_samples, n_features)
Non-negative weights encoding the reliability of each measurement.
Equivalent to the inverse of the Gaussian errorbar.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X, weights = check_array_with_weights(X, weights)
X_c = X - self.mean_
if weights is not None:
assert X.shape == weights.shape
X_c[weights == 0] = 0
return self._Estep(X_c, weights, self.components_)
def inverse_transform(self, X):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
Data in transformed representation.
Returns
-------
X_original : array-like, shape (n_samples, n_features)
"""
X = check_array(X)
return self.mean_ + np.dot(X, self.components_)
def reconstruct(self, X, weights=None):
"""Reconstruct the data using the PCA model
This is equivalent to calling transform followed by inverse_transform.
Parameters
----------
X : array-like, shape (n_samples, n_components)
Data in transformed representation.
weights: array-like, shape (n_samples, n_features)
Non-negative weights encoding the reliability of each measurement.
Equivalent to the inverse of the Gaussian errorbar.
Returns
-------
X_reconstructed : ndarray, shape (n_samples, n_components)
Reconstructed version of X
"""
return self.inverse_transform(self.transform(X, weights=weights))
def fit_reconstruct(self, X, weights=None):
"""Fit the model and reconstruct the data using the PCA model
This is equivalent to calling fit_transform()
followed by inverse_transform().
Parameters
----------
X : array-like, shape (n_samples, n_components)
Data in transformed representation.
weights: array-like, shape (n_samples, n_features)
Non-negative weights encoding the reliability of each measurement.
Equivalent to the inverse of the Gaussian errorbar.
Returns
-------
X_reconstructed : ndarray, shape (n_samples, n_components)
Reconstructed version of X
"""
return self.inverse_transform(self.fit_transform(X, weights=weights))
|
jakevdp/wpca
|
wpca/empca.py
|
Python
|
bsd-3-clause
| 8,136
|
[
"Gaussian"
] |
d7cc91e3624ea656acb44693ed85c26e8e4459104fe9cfb60ff0bfce07a3090b
|
# Copyright (c) 2007 The Regents of The University of Michigan
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import m5
from m5 import internal
from m5.internal.stats import schedStatEvent as schedEvent
from m5.objects import Root
from m5.util import attrdict, fatal
outputList = []
def initText(filename, desc=True):
output = internal.stats.initText(filename, desc)
outputList.append(output)
def initSimStats():
internal.stats.initSimStats()
names = []
stats_dict = {}
stats_list = []
raw_stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
__dynamic_cast = []
for k, v in internal.stats.__dict__.iteritems():
if k.startswith('dynamic_'):
__dynamic_cast.append(v)
for stat in internal.stats.statsList():
for cast in __dynamic_cast:
val = cast(stat)
if val is not None:
stats_list.append(val)
raw_stats_list.append(val)
break
else:
fatal("unknown stat type %s", stat)
for stat in stats_list:
if not stat.check() or not stat.baseCheck():
fatal("stat check failed for '%s' %d\n", stat.name, stat.id)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
def less(stat1, stat2):
v1 = stat1.name.split('.')
v2 = stat2.name.split('.')
return v1 < v2
stats_list.sort(less)
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
internal.stats.enable();
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
for stat in stats_list:
stat.prepare()
lastDump = 0
def dump():
'''Dump all statistics data to the registered outputs'''
curTick = m5.curTick()
global lastDump
assert lastDump <= curTick
if lastDump == curTick:
return
lastDump = curTick
internal.stats.processDumpQueue()
prepare()
for output in outputList:
if output.valid():
output.begin()
for stat in stats_list:
output.visit(stat)
output.end()
def reset():
'''Reset all statistics to the base state'''
# call reset stats on all SimObjects
root = Root.getInstance()
if root:
for obj in root.descendants(): obj.resetStats()
# call any other registered stats reset callbacks
for stat in stats_list:
stat.reset()
internal.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
|
samueldotj/TeeRISC-Simulator
|
src/python/m5/stats/__init__.py
|
Python
|
bsd-3-clause
| 4,468
|
[
"VisIt"
] |
7ab1a8f20ef7efe2da0b4aec1cc6dfad1f9ed951aef33795c2db641f4e7f7d68
|
"""Copy database grids to netcdf.
Example: python db_to_netcdf.py <year> <month> <day> <utchour>
If hour and minute are omitted, this is a daily copy, otherwise hourly.
see: akrherz/iem#199
"""
import sys
import datetime
import numpy as np
from pyiem.util import utc, ncopen, logger
from pyiem import iemre
LOG = logger()
def main(argv):
"""Go Main Go."""
if len(argv) == 6:
valid = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
ncfn = iemre.get_hourly_ncname(valid.year)
idx = iemre.hourly_offset(valid)
else:
valid = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
ncfn = iemre.get_daily_ncname(valid.year)
idx = iemre.daily_offset(valid)
ds = iemre.get_grids(valid)
with ncopen(ncfn, "a", timeout=600) as nc:
for vname in ds:
if vname not in nc.variables:
continue
LOG.debug("copying database var %s to netcdf", vname)
# Careful here, ds could contain NaN values
nc.variables[vname][idx, :, :] = np.ma.array(
ds[vname].values, mask=np.isnan(ds[vname].values)
)
if __name__ == "__main__":
main(sys.argv)
|
akrherz/iem
|
scripts/iemre/db_to_netcdf.py
|
Python
|
mit
| 1,213
|
[
"NetCDF"
] |
2f8f25f4df23ea9474aadb02c080d1b764685bb7dd3e00bd2ff0ac8e67c9a6a2
|
########################################################################
# File: FTSAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/31 10:00:13
########################################################################
""" :mod: FTSAgent
==============
.. module: FTSAgent
:synopsis: agent propagating scheduled RMS request in FTS
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
DIRAC agent propagating scheduled RMS request in FTS
Request processing phases (each in a separate thread):
1. MONITOR
...active FTSJobs, prepare FTSFiles dictionary with files to submit, fail, register and reschedule
2. CHECK REPLICAS
...just in case if all transfers are done, if yes, end processing
3. FAILED FILES:
...if at least one Failed FTSFile is found, set Request.Operation.File to 'Failed', end processing
4. UPDATE Waiting#SourceSE FTSFiles
...if any found in FTSDB
5. REGISTER REPLICA
...insert RegisterReplica operation to request, if some FTSFiles failed to register, end processing
6. RESCHEDULE FILES
...for FTSFiles failed with missing sources error
7. SUBMIT
...but read 'Waiting' FTSFiles first from FTSDB and merge those with FTSFiles to retry
"""
__RCSID__ = "$Id: $"
# #
# @file FTSAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/31 10:00:51
# @brief Definition of FTSAgent class.
# # imports
import time
import datetime
import re
import math
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
# # from CS
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
# # from Core
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Time import fromString
from DIRAC.Core.Utilities.List import breakListIntoChunks
# # from DMS
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.private.FTSPlacement import FTSPlacement
from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
# # from RMS
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
# # from RSS
# #from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # from Resources
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
# # from Accounting
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
# # agent base name
AGENT_NAME = "DataManagement/FTSAgent"
class EscapeTryException(Exception):
pass
########################################################################
class FTSAgent(AgentModule):
"""
.. class:: FTSAgent
Agent propagating Scheduled request to Done or Failed state in the FTS system.
Requests and associated FTSJobs (and so FTSFiles) are kept in cache.
"""
# request cache
__reqCache = dict()
# # fts placement refresh in seconds
FTSPLACEMENT_REFRESH = FTSHistoryView.INTERVAL / 2
# # placeholder for max job per channel
MAX_ACTIVE_JOBS = 50
# # min threads
MIN_THREADS = 1
# # max threads
MAX_THREADS = 10
# # files per job
MAX_FILES_PER_JOB = 100
# # MAX FTS transfer per FTSFile
MAX_ATTEMPT = 256
# # stage flag
PIN_TIME = 0
# # FTS submission command
SUBMIT_COMMAND = 'glite-transfer-submit'
# # FTS monitoring command
MONITOR_COMMAND = 'glite-transfer-status'
# Max number of requests fetched from the RMS
MAX_REQUESTS = 100
# Minimum interval (seconds) between 2 job monitoring
MONITORING_INTERVAL = 600
# Flag to know if one selects JOb requests (True) or not (False) or don't care (None)
PROCESS_JOB_REQUESTS = None
# # placeholder for FTS client
__ftsClient = None
# # placeholder for the FTS version
__ftsVersion = None
# # placeholder for request client
__requestClient = None
# # placeholder for resources helper
__resources = None
# # placeholder for RSS client
__rssClient = None
# # placeholder for FTSPlacement
__ftsPlacement = None
# # placement regeneration time delta
__ftsPlacementValidStamp = None
# # placeholder for threadPool
__threadPool = None
# # update lock
__updateLock = None
# # request cache
__reqCache = dict()
registrationProtocols = None
def updateLock(self):
""" update lock """
if not self.__updateLock:
self.__updateLock = LockRing().getLock("FTSAgentLock")
return self.__updateLock
@classmethod
def requestClient(cls):
""" request client getter """
if not cls.__requestClient:
cls.__requestClient = ReqClient()
return cls.__requestClient
@classmethod
def ftsClient(cls):
""" FTS client """
if not cls.__ftsClient:
cls.__ftsClient = FTSClient()
return cls.__ftsClient
@classmethod
def rssClient(cls):
""" RSS client getter """
if not cls.__rssClient:
cls.__rssClient = ResourceStatus()
return cls.__rssClient
@classmethod
def getRequest(cls, reqID):
""" get Requests systematically and refresh cache """
# Make sure the request is Scheduled
res = cls.requestClient().getRequestStatus(reqID)
if not res['OK']:
cls.__reqCache.pop(reqID, None)
return res
status = res['Value']
if status != 'Scheduled':
cls.__reqCache.pop(reqID, None)
return S_ERROR("Request with id %s is not Scheduled:%s" % (reqID, status))
getRequest = cls.requestClient().getRequest(reqID)
if not getRequest["OK"]:
cls.__reqCache.pop(reqID, None)
return getRequest
getRequest = getRequest["Value"]
if not getRequest:
cls.__reqCache.pop(reqID, None)
return S_ERROR("request of id '%s' not found in ReqDB" % reqID)
cls.__reqCache[reqID] = getRequest
return S_OK(cls.__reqCache[reqID])
@classmethod
def putRequest(cls, request, clearCache=True):
""" put request back to ReqDB
:param ~DIRAC.RequestManagementSystem.Client.Request.Request request: Request instance
:param bool clearCache: clear the cache?
also finalize request if status == Done
"""
# # put back request
if request.RequestID not in cls.__reqCache:
return S_OK()
put = cls.requestClient().putRequest(request)
if not put["OK"]:
return put
# # finalize first if possible
if request.Status == "Done" and request.JobID:
finalizeRequest = cls.requestClient().finalizeRequest(request.RequestID, request.JobID)
if not finalizeRequest["OK"]:
request.Status = "Scheduled"
# # del request from cache if needed
if clearCache:
cls.__reqCache.pop(request.RequestID, None)
return S_OK()
@classmethod
def putFTSJobs(cls, ftsJobsList):
""" put back fts jobs to the FTSDB """
for ftsJob in ftsJobsList:
put = cls.ftsClient().putFTSJob(ftsJob)
if not put["OK"]:
return put
return S_OK()
@staticmethod
def updateFTSFileDict(ftsFilesDict, toUpdateDict):
""" update `ftsFilesDict` with FTSFiles in `toUpdateDict` """
for category, ftsFileList in ftsFilesDict.iteritems():
for ftsFile in toUpdateDict.get(category, []):
if ftsFile not in ftsFileList:
ftsFileList.append(ftsFile)
return ftsFilesDict
def threadPool(self):
""" thread pool getter """
if not self.__threadPool:
self.__threadPool = ThreadPool(self.MIN_THREADS, self.MAX_THREADS)
self.__threadPool.daemonize()
return self.__threadPool
def resetFTSPlacement(self):
""" create fts Placement """
ftsHistory = self.ftsClient().getFTSHistory()
if not ftsHistory["OK"]:
self.log.error("unable to get FTS history:", ftsHistory["Message"])
return ftsHistory
ftsHistory = ftsHistory["Value"]
try:
self.updateLock().acquire()
if not self.__ftsPlacement:
self.__ftsPlacement = FTSPlacement(csPath=None, ftsHistoryViews=ftsHistory)
else:
self.__ftsPlacement.refresh(ftsHistoryViews=ftsHistory)
finally:
self.updateLock().release()
# # save time stamp
self.__ftsPlacementValidStamp = datetime.datetime.now(
) + datetime.timedelta(seconds=self.FTSPLACEMENT_REFRESH)
return S_OK()
def __init__(self, agentName, loadName, baseAgentName=False, properties=None):
if properties is None:
properties = {}
super(FTSAgent, self).__init__(agentName, loadName, baseAgentName=baseAgentName, properties=properties)
self.__factorOnMaxRequest = 3.
def initialize(self):
""" agent's initialization """
# # data manager
self.dataManager = DataManager()
log = self.log.getSubLogger("initialize")
self.FTSPLACEMENT_REFRESH = self.am_getOption(
"FTSPlacementValidityPeriod", self.FTSPLACEMENT_REFRESH)
log.info("FTSPlacement validity period = %s s" %
self.FTSPLACEMENT_REFRESH)
self.SUBMIT_COMMAND = self.am_getOption(
"SubmitCommand", self.SUBMIT_COMMAND)
log.info("FTS submit command = %s" % self.SUBMIT_COMMAND)
self.MONITOR_COMMAND = self.am_getOption(
"MonitorCommand", self.MONITOR_COMMAND)
log.info("FTS commands: submit = %s monitor %s" %
(self.SUBMIT_COMMAND, self.MONITOR_COMMAND))
self.PIN_TIME = self.am_getOption("PinTime", self.PIN_TIME)
log.info("Stage files before submission = ", {
True: "yes", False: "no"}[bool(self.PIN_TIME)])
self.MAX_ACTIVE_JOBS = self.am_getOption(
"MaxActiveJobsPerRoute", self.MAX_ACTIVE_JOBS)
log.info("Max active FTSJobs/route = ", str(self.MAX_ACTIVE_JOBS))
self.MAX_FILES_PER_JOB = self.am_getOption(
"MaxFilesPerJob", self.MAX_FILES_PER_JOB)
log.info("Max FTSFiles/FTSJob = ", str(self.MAX_FILES_PER_JOB))
self.MAX_ATTEMPT = self.am_getOption(
"MaxTransferAttempts", self.MAX_ATTEMPT)
log.info("Max transfer attempts = ", str(self.MAX_ATTEMPT))
# # thread pool
self.MIN_THREADS = self.am_getOption("MinThreads", self.MIN_THREADS)
self.MAX_THREADS = self.am_getOption("MaxThreads", self.MAX_THREADS)
minmax = (abs(self.MIN_THREADS), abs(self.MAX_THREADS))
self.MIN_THREADS, self.MAX_THREADS = min(minmax), max(minmax)
log.info("ThreadPool min threads = ", str(self.MIN_THREADS))
log.info("ThreadPool max threads = ", str(self.MAX_THREADS))
self.MAX_REQUESTS = self.am_getOption("MaxRequests", self.MAX_REQUESTS)
log.info("Max Requests fetched = ", str(self.MAX_REQUESTS))
self.MONITORING_INTERVAL = self.am_getOption(
"MonitoringInterval", self.MONITORING_INTERVAL)
log.info("Minimum monitoring interval = ",
str(self.MONITORING_INTERVAL))
self.PROCESS_JOB_REQUESTS = self.am_getOption(
"ProcessJobRequests", self.PROCESS_JOB_REQUESTS)
# We get a string as the default value is None... better than an eval()!
self.PROCESS_JOB_REQUESTS = {'True': True, 'False': False}.get(
self.PROCESS_JOB_REQUESTS, self.PROCESS_JOB_REQUESTS)
if self.PROCESS_JOB_REQUESTS is not None:
log.info("Process job requests = ",
str(self.PROCESS_JOB_REQUESTS))
self.__ftsVersion = Operations().getValue('DataManagement/FTSVersion', 'FTS2')
log.info("FTSVersion : %s" % self.__ftsVersion)
log.info("initialize: creation of FTSPlacement...")
createPlacement = self.resetFTSPlacement()
if not createPlacement["OK"]:
log.error("initialize:", createPlacement["Message"])
return createPlacement
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption('shifterProxy', 'DataManager')
log.info("will use DataManager proxy")
self.registrationProtocols = DMSHelpers().getRegistrationProtocols()
# # gMonitor stuff here
gMonitor.registerActivity("RequestsAtt", "Attempted requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RequestsOK", "Successful requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RequestsFail", "Failed requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSJobsSubAtt", "FTSJobs creation attempts",
"FTSAgent", "Created FTSJobs/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSJobsSubOK", "FTSJobs submitted successfully",
"FTSAgent", "Successful FTSJobs submissions/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSJobsSubFail", "FTSJobs submissions failed",
"FTSAgent", "Failed FTSJobs submissions/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSJobsMonAtt", "FTSJobs monitored",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSJobsMonOK", "FTSJobs monitored successfully",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSJobsMonFail", "FTSJobs attempts failed",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSMonitorFail", "Failed FTS monitor executions",
"FTSAgent", "Execution/mins", gMonitor.OP_SUM)
pollingTime = self.am_getOption("PollingTime", 60)
for status in list(FTSJob.INITSTATES + FTSJob.TRANSSTATES + FTSJob.FAILEDSTATES + FTSJob.FINALSTATES):
gMonitor.registerActivity("FTSJobs%s" % status, "FTSJobs %s" % status,
"FTSAgent", "FTSJobs/cycle", gMonitor.OP_ACUM, pollingTime)
gMonitor.registerActivity("FtSJobsPerRequest", "Average FTSJobs per request",
"FTSAgent", "FTSJobs/Request", gMonitor.OP_MEAN)
gMonitor.registerActivity("FTSFilesPerJob", "FTSFiles per FTSJob",
"FTSAgent", "Number of FTSFiles per FTSJob", gMonitor.OP_MEAN)
gMonitor.registerActivity("FTSSizePerJob", "Average FTSFiles size per FTSJob",
"FTSAgent", "Average submitted size per FTSJob", gMonitor.OP_MEAN)
return S_OK()
def finalize(self):
""" finalize processing """
# log = self.log.getSubLogger( "finalize" )
# if self.__reqCache:
# log.info( 'putting back %d requests from cache' % len( self.__reqCache ) )
# else:
# log.info( 'no requests to put back' )
# for request in self.__reqCache.values():
# put = self.requestClient().putRequest( request )
# if not put["OK"]:
# log.error( "unable to put back request '%s': %s" % ( request.RequestName, put["Message"] ) )
return S_OK()
def execute(self):
""" one cycle execution """
# Don't use the server certificate otherwise the DFC wont let us write
gConfigurationData.setOptionInCFG('/DIRAC/Security/UseServerCertificate', 'false')
log = gLogger.getSubLogger("execute")
# # reset FTSPlacement if expired
now = datetime.datetime.now()
if now > self.__ftsPlacementValidStamp:
log.info("resetting expired FTS placement...")
resetFTSPlacement = self.resetFTSPlacement()
if not resetFTSPlacement["OK"]:
log.error("FTSPlacement recreation error:", resetFTSPlacement["Message"])
return resetFTSPlacement
self.__ftsPlacementValidStamp = now + \
datetime.timedelta(seconds=self.FTSPLACEMENT_REFRESH)
# To be sure we have enough requests, ask for several times as much
requestIDs = self.requestClient().getRequestIDsList(
statusList=["Scheduled"], limit=int(
self.__factorOnMaxRequest * self.MAX_REQUESTS), getJobID=True)
if not requestIDs["OK"]:
log.error("unable to read scheduled request ids", requestIDs["Message"])
return requestIDs
if not requestIDs["Value"]:
requestIDs = []
elif self.PROCESS_JOB_REQUESTS is None:
requestIDs = [req[0] for req in requestIDs["Value"] if req[0] not in self.__reqCache]
else:
# If we want to process requests only with JobID or only without jobID, make a selection
requestIDs = [req[0] for req in requestIDs["Value"]
if req[0] not in self.__reqCache and
len(req) >= 4 and bool(req[3]) == self.PROCESS_JOB_REQUESTS]
# Correct the factor by the observed ratio between needed and obtained, but limit between 1 and 5
gotRequests = len(requestIDs) + 1
neededRequests = self.MAX_REQUESTS - len(self.__reqCache)
self.__factorOnMaxRequest = max(1,
min(10,
math.ceil(self.__factorOnMaxRequest * neededRequests / float(gotRequests))))
# We took more but keep only the maximum number
requestIDs = requestIDs[:neededRequests] + self.__reqCache.keys()
if not requestIDs:
log.info("no 'Scheduled' requests to process")
return S_OK()
log.info("found %s requests to process:" % len(requestIDs))
log.info(" => from internal cache: %s" % (len(self.__reqCache)))
log.info(" => new read from RMS: %s" % (len(requestIDs) - len(self.__reqCache)))
for requestID in requestIDs:
request = self.getRequest(requestID)
if not request["OK"]:
log.error("Error getting request", "%s: %s" % (requestID, request["Message"]))
continue
request = request["Value"]
sTJId = request.RequestID
fullLogged = 0
while True:
queue = self.threadPool().generateJobAndQueueIt(self.processRequest,
args=(request, ),
sTJId=sTJId)
if queue["OK"]:
log.info("Request enqueued for execution%s" %
((' (after waiting %d seconds)' % fullLogged) if fullLogged else ''),
sTJId)
gMonitor.addMark("RequestsAtt", 1)
break
if not fullLogged:
log.info("Queue is full, wait 1 second to enqueue")
fullLogged += 1
time.sleep(1)
# # process all results
self.threadPool().processAllResults()
return S_OK()
def processRequest(self, request):
""" process one request
:param ~DIRAC.RequestManagementSystem.Client.Request.Request request: ReqDB.Request
"""
log = self.log.getSubLogger("req_%s/%s" % (request.RequestID, request.RequestName))
operation = request.getWaiting()
if not operation["OK"]:
log.error("Unable to find 'Scheduled' ReplicateAndRegister operation in request")
return self.putRequest(request)
operation = operation["Value"]
if not isinstance(operation, Operation):
log.error("Waiting returned operation is not an operation:", type(operation))
return self.putRequest(request)
if operation.Type != "ReplicateAndRegister":
log.error("operation to be executed is not a ReplicateAndRegister but", operation.Type)
return self.putRequest(request)
if operation.Status != "Scheduled":
log.error("operation in a wrong state, expecting 'Scheduled', got", operation.Status)
return self.putRequest(request)
log.info('start processRequest')
# # select FTSJobs, by default all in TRANS_STATES and INIT_STATES
ftsJobs = self.ftsClient().getFTSJobsForRequest(request.RequestID)
if not ftsJobs["OK"]:
log.error(ftsJobs["Message"])
return ftsJobs
ftsJobs = [ftsJob for ftsJob in ftsJobs.get("Value", []) if ftsJob.Status not in FTSJob.FINALSTATES]
# # Use a try: finally: for making sure FTS jobs are put back before returning
try:
# # dict keeping info about files to reschedule, submit, fail and register
ftsFilesDict = dict((k, list()) for k in ("toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate"))
now = datetime.datetime.utcnow()
jobsToMonitor = [job for job in ftsJobs if
(now - job.LastUpdate).seconds >
(self.MONITORING_INTERVAL *
(3. if StorageElement(job.SourceSE).status()['TapeSE'] else 1.))
]
if jobsToMonitor:
log.info("==> found %s FTSJobs to monitor" % len(jobsToMonitor))
# # PHASE 0 = monitor active FTSJobs
for ftsJob in jobsToMonitor:
monitor = self.__monitorJob(request, ftsJob)
if not monitor["OK"]:
log.error("unable to monitor FTSJob", "%s: %s" % (ftsJob.FTSJobID, monitor["Message"]))
ftsJob.Status = "Submitted"
else:
ftsFilesDict = self.updateFTSFileDict(ftsFilesDict, monitor["Value"])
log.info("monitoring of FTSJobs completed")
for key, ftsFiles in ftsFilesDict.iteritems():
if ftsFiles:
log.info(" => %d FTSFiles to %s" % (len(ftsFiles), key[2:].lower()))
if len(ftsJobs) != len(jobsToMonitor):
log.info("==> found %d FTSJobs that were monitored recently" % (len(ftsJobs) - len(jobsToMonitor)))
if not jobsToMonitor:
# Nothing to happen this time, escape
raise EscapeTryException
# # PHASE ONE - check ready replicas
missingReplicas = self.__checkReadyReplicas(request, operation)
if not missingReplicas["OK"]:
log.error(missingReplicas["Message"])
else:
missingReplicas = missingReplicas["Value"]
for opFile in operation:
# Actually the condition below should never happen... Change printout for checking
if opFile.LFN not in missingReplicas and opFile.Status not in ('Done', 'Failed'):
log.warn("File should be set Done! %s is replicated at all targets" % opFile.LFN)
opFile.Status = "Done"
if missingReplicas:
# Check if these files are in the FTSDB
ftsFiles = self.ftsClient().getAllFTSFilesForRequest(request.RequestID)
if not ftsFiles['OK']:
log.error(ftsFiles['Message'])
else:
ftsFiles = ftsFiles['Value']
ftsLfns = set(ftsFile.LFN for ftsFile in ftsFiles)
# Recover files not in FTSDB
toSchedule = set(missingReplicas) - ftsLfns
if toSchedule:
log.warn('%d files in operation are not in FTSDB, reset them Waiting' % len(toSchedule))
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# Recover files with target not in FTSDB
toSchedule = set(missing for missing, missingSEs in missingReplicas.iteritems()
if not [ftsFile for ftsFile in ftsFiles
if ftsFile.LFN == missing and ftsFile.TargetSE in missingSEs])
if toSchedule:
log.warn('%d targets in operation are not in FTSDB, reset files Waiting' % len(toSchedule))
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# identify missing LFNs that are waiting for a replication which is finished
for ftsFile in [f for f in ftsFiles if f.LFN in missingReplicas and f.Status.startswith('Waiting#')]:
targetSE = ftsFile.Status.split('#')[1]
finishedFiles = [f for f in ftsFiles if
f.LFN == ftsFile.LFN and
f.Status == 'Finished' and
f.TargetSE == targetSE and
f not in ftsFilesDict['toUpdate']]
if finishedFiles:
log.warn(
"%s is %s while replication was Finished to %s, update" %
(ftsFile.LFN, ftsFile.Status, targetSE))
ftsFilesDict['toUpdate'] += finishedFiles
# identify Active transfers for which there is no FTS job any longer and reschedule them
for ftsFile in [f for f in ftsFiles if f.Status ==
'Active' and f.TargetSE in missingReplicas.get(f.LFN, [])]:
if not [ftsJob for ftsJob in ftsJobs if ftsJob.FTSGUID == ftsFile.FTSGUID]:
ftsFilesDict['toReschedule'].append(ftsFile)
# identify Finished transfer for which the replica is still missing
for ftsFile in [f for f in ftsFiles if f.Status == 'Finished' and f.TargetSE in missingReplicas.get(
f.LFN, []) and f not in ftsFilesDict['toRegister']]:
# Check if there is a registration operation for that file and that target
regOp = [op for op in request if
op.Type == 'RegisterReplica' and
op.TargetSE == ftsFile.TargetSE and
[f for f in op if f.LFN == ftsFile.LFN]]
if not regOp:
ftsFilesDict['toReschedule'].append(ftsFile)
# Recover files that are Failed but were not spotted
for ftsFile in [f for f in ftsFiles if f.Status ==
'Failed' and f.TargetSE in missingReplicas.get(f.LFN, [])]:
reschedule, submit, fail = self.__checkFailed(ftsFile)
if fail and ftsFile not in ftsFilesDict['toFail']:
ftsFilesDict['toFail'].append(ftsFile)
elif reschedule and ftsFile not in ftsFilesDict['toReschedule']:
ftsFilesDict['toReschedule'].append(ftsFile)
elif submit and ftsFile not in ftsFilesDict['toSubmit']:
ftsFilesDict['toSubmit'].append(ftsFile)
# If all transfers are finished for unregistered files and there is
# already a registration operation, set it Done
ftsLFNs = [f.LFN for f in ftsFiles]
for lfn in missingReplicas:
# We make sure here that the file is being processed by FTS
if lfn in ftsLFNs:
if not [f for f in ftsFiles if
f.LFN == lfn and
(f.Status != 'Finished' or
f in ftsFilesDict['toReschedule'] or
f in ftsFilesDict['toRegister'])]:
for opFile in operation:
if opFile.LFN == lfn:
opFile.Status = 'Done'
break
else:
# Temporary log
log.warn("File with missing replica not in FTS files", lfn)
for key, ftsFiles in ftsFilesDict.iteritems():
if ftsFiles:
log.info(" => %d FTSFiles to %s" % (len(ftsFiles), key[2:].lower()))
toFail = ftsFilesDict.get("toFail", [])
toReschedule = ftsFilesDict.get("toReschedule", [])
toSubmit = ftsFilesDict.get("toSubmit", [])
toRegister = ftsFilesDict.get("toRegister", [])
toUpdate = ftsFilesDict.get("toUpdate", [])
# # PHASE TWO = Failed files? -> make request Failed and return
if toFail:
log.error("==> found %d 'Failed' FTSFiles, but maybe other files can be processed..." % len(toFail))
for opFile in operation:
for ftsFile in toFail:
if opFile.FileID == ftsFile.FileID:
opFile.Error = ftsFile.Error
opFile.Status = "Failed"
operation.Error = "%s files are missing any replicas" % len(toFail)
# # requets.Status should be Failed if all files in the operation "Failed"
if request.Status == "Failed":
request.Error = "ReplicateAndRegister %s failed" % operation.Order
log.error("request is set to 'Failed'")
# # putRequest is done by the finally: clause... Not good to do it twice
raise EscapeTryException
# # PHASE THREE - update Waiting#TargetSE FTSFiles
if toUpdate:
log.info("==> found %s possible FTSFiles to update..." % (len(toUpdate)))
byTarget = {}
for ftsFile in toUpdate:
byTarget.setdefault(ftsFile.TargetSE, []).append(ftsFile.FileID)
for targetSE, fileIDList in byTarget.iteritems():
update = self.ftsClient().setFTSFilesWaiting(operation.OperationID, targetSE, fileIDList)
if not update["OK"]:
log.error("update FTSFiles failed:", update["Message"])
# # PHASE FOUR - add 'RegisterReplica' Operations
if toRegister:
log.info("==> found %d Files waiting for registration, adding 'RegisterReplica' operations" % len(toRegister))
registerFiles = self.__insertRegisterOperation(request, operation, toRegister)
if not registerFiles["OK"]:
log.error("unable to create 'RegisterReplica' operations:", registerFiles["Message"])
# if request.Status == "Waiting":
# log.info( "request is in 'Waiting' state, will put it back to RMS" )
# return self.putRequest( request )
# # PHASE FIVE - reschedule operation files
if toReschedule:
log.info("==> found %s Files to reschedule" % len(toReschedule))
rescheduleFiles = self.__reschedule(request, operation, toReschedule)
if not rescheduleFiles["OK"]:
log.error('Failed to reschedule files', rescheduleFiles["Message"])
# # PHASE SIX - read Waiting ftsFiles and submit new FTSJobs. We get also Failed files to recover them if needed
ftsFiles = self.ftsClient().getFTSFilesForRequest(
request.RequestID, ["Waiting", "Failed", 'Submitted', 'Canceled'])
if not ftsFiles["OK"]:
log.error(ftsFiles["Message"])
else:
retryIds = set(ftsFile.FTSFileID for ftsFile in toSubmit)
for ftsFile in ftsFiles["Value"]:
if ftsFile.FTSFileID not in retryIds:
if ftsFile.Status in ('Failed', 'Canceled'):
# If the file was not unrecoverable failed and is not yet set toSubmit
_reschedule, submit, _fail = self.__checkFailed(ftsFile)
elif ftsFile.Status == 'Submitted':
if ftsFile.FTSGUID not in [job.FTSGUID for job in ftsJobs]:
log.warn('FTS GUID %s not found in FTS jobs, resubmit file transfer' % ftsFile.FTSGUID)
ftsFile.Status = 'Waiting'
submit = True
else:
submit = False
else:
submit = True
if submit:
toSubmit.append(ftsFile)
retryIds.add(ftsFile.FTSFileID)
# # should not put back jobs that have not been monitored this time
ftsJobs = jobsToMonitor
# # submit new ftsJobs
if toSubmit:
if request.Status != 'Scheduled':
log.info("Found %d FTSFiles to submit while request is no longer in Scheduled status (%s)"
% (len(toSubmit), request.Status))
else:
self.__checkDuplicates(request.RequestID, toSubmit)
log.info("==> found %s FTSFiles to submit" % len(toSubmit))
submit = self.__submit(request, operation, toSubmit)
if not submit["OK"]:
log.error(submit["Message"])
else:
ftsJobs += submit["Value"]
# # status change? - put back request
if request.Status != "Scheduled":
log.info("request no longer in 'Scheduled' state (%s), will put it back to RMS" % request.Status)
except EscapeTryException:
# This clause is raised when one wants to return from within the try: clause
# only put back jobs that were monitored
ftsJobs = jobsToMonitor
except Exception as exceptMessage:
log.exception("Exception in processRequest", lException=exceptMessage)
finally:
putRequest = self.putRequest(request, clearCache=(request.Status != "Scheduled"))
if not putRequest["OK"]:
log.error("unable to put back request:", putRequest["Message"])
# # put back jobs in all cases
if ftsJobs:
for ftsJob in list(ftsJobs):
if not len(ftsJob):
log.warn('FTS job empty, removed: %s' % ftsJob.FTSGUID)
self.ftsClient().deleteFTSJob(ftsJob.FTSJobID)
ftsJobs.remove(ftsJob)
putJobs = self.putFTSJobs(ftsJobs)
if not putJobs["OK"]:
log.error("unable to put back FTSJobs:", putJobs["Message"])
putRequest = putJobs
# This is where one returns from after execution of the finally: block
return putRequest
def __checkDuplicates(self, reqID, toSubmit):
""" Check in a list of FTSFiles whether there are duplicates
"""
tupleList = []
log = self.log.getSubLogger("%s/checkDuplicates" % reqID)
for ftsFile in list(toSubmit):
fTuple = (ftsFile.LFN, ftsFile.SourceSE, ftsFile.TargetSE)
if fTuple in tupleList:
log.warn("Duplicate file to submit, removed:", ', '.join(fTuple))
toSubmit.remove(ftsFile)
self.ftsClient().deleteFTSFiles(ftsFile.OperationID, [ftsFile.FileID])
else:
tupleList.append(fTuple)
def __reschedule(self, request, operation, toReschedule):
""" reschedule list of :toReschedule: files in request for operation :operation:
:param Request request:
:param Operation operation:
:param list toReschedule: list of FTSFiles
"""
log = self.log.getSubLogger("req_%s/%s/reschedule" % (request.RequestID, request.RequestName))
ftsFileIDs = [ftsFile.FileID for ftsFile in toReschedule]
for opFile in operation:
if opFile.FileID in ftsFileIDs:
opFile.Status = "Waiting"
toSchedule = []
# # filter files
for opFile in [opf for opf in operation if opf.Status == "Waiting"]:
replicas = self.__filterReplicas(opFile)
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas.get("Valid")
noMetaReplicas = replicas.get("NoMetadata")
noReplicas = replicas.get('NoReplicas')
badReplicas = replicas.get('Bad')
noActiveReplicas = replicas.get('NoActiveReplicas')
if validReplicas:
validTargets = list(set(operation.targetSEList) - set(validReplicas))
if not validTargets:
log.info("file %s is already present at all targets" % opFile.LFN)
opFile.Status = "Done"
else:
toSchedule.append((opFile.toJSON()["Value"], validReplicas, validTargets))
elif noMetaReplicas:
log.warn("unable to schedule '%s', couldn't get metadata at %s" %
(opFile.LFN, ','.join(noMetaReplicas)))
elif noReplicas:
log.warn("unable to schedule %s, file doesn't exist at %s" %
(opFile.LFN, ','.join(noReplicas)))
opFile.Status = 'Failed'
elif badReplicas:
log.warn("unable to schedule %s, all replicas have a bad checksum at %s" %
(opFile.LFN, ','.join(badReplicas)))
opFile.Status = 'Failed'
elif noActiveReplicas:
log.warn("unable to schedule '%s', couldn't find active replicas at %s" %
(opFile.LFN, ','.join(noActiveReplicas)))
request.delayNextExecution(60)
# # do real schedule here
if toSchedule:
log.info("Rescheduling %d files" % len(toReschedule))
ftsSchedule = self.ftsClient().ftsSchedule(request.RequestID,
operation.OperationID,
toSchedule)
if not ftsSchedule["OK"]:
log.error("Error scheduling files", ftsSchedule["Message"])
return ftsSchedule
ftsSchedule = ftsSchedule["Value"]
for opFile in operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
opFile.Status = "Scheduled"
elif fileID in ftsSchedule["Failed"]:
opFile.Error = ftsSchedule["Failed"][fileID]
log.error("Error scheduling file %s" % opFile.LFN, opFile.Error)
return S_OK()
def __submit(self, request, operation, toSubmit):
""" create and submit new FTSJobs using list of FTSFiles
:param Request request: ReqDB.Request instance
:param list ftsFiles: list of FTSFile instances
:return: [ FTSJob, FTSJob, ...]
"""
log = self.log.getSubLogger("req_%s/%s/submit" % (request.RequestID, request.RequestName))
bySourceAndTarget = {}
for ftsFile in toSubmit:
if ftsFile.SourceSE not in bySourceAndTarget:
bySourceAndTarget.setdefault(ftsFile.SourceSE, {})
if ftsFile.TargetSE not in bySourceAndTarget[ftsFile.SourceSE]:
bySourceAndTarget[ftsFile.SourceSE].setdefault(ftsFile.TargetSE, [])
bySourceAndTarget[ftsFile.SourceSE][ftsFile.TargetSE].append(ftsFile)
ftsJobs = []
for source, targetDict in bySourceAndTarget.iteritems():
for target, ftsFileList in targetDict.iteritems():
log.info("found %s files to submit from %s to %s" % (len(ftsFileList), source, target))
route = self.__ftsPlacement.findRoute(source, target)
if not route["OK"]:
log.error(route["Message"])
continue
route = route["Value"]
routeValid = self.__ftsPlacement.isRouteValid(route)
if not routeValid['OK']:
log.error("Route invalid : %s" % routeValid['Message'])
continue
sourceSE = StorageElement(source)
sourceToken = sourceSE.getStorageParameters(protocol='srm')
if not sourceToken["OK"]:
log.error("unable to get sourceSE parameters:", "(%s) %s" % (source, sourceToken["Message"]))
continue
seStatus = sourceSE.status()
targetSE = StorageElement(target)
targetToken = targetSE.getStorageParameters(protocol='srm')
if not targetToken["OK"]:
log.error("unable to get targetSE parameters:", "(%s) %s" % (target, targetToken["Message"]))
continue
# # create FTSJob
for fileList in breakListIntoChunks(ftsFileList, self.MAX_FILES_PER_JOB):
ftsJob = FTSJob()
ftsJob.RequestID = request.RequestID
ftsJob.OperationID = operation.OperationID
ftsJob.SourceSE = source
ftsJob.TargetSE = target
ftsJob.SourceToken = sourceToken["Value"].get("SpaceToken", "")
ftsJob.TargetToken = targetToken["Value"].get("SpaceToken", "")
ftsJob.FTSServer = route.ftsServer
for ftsFile in fileList:
ftsFile.Attempt += 1
ftsFile.Error = ""
ftsJob.addFile(ftsFile)
submit = ftsJob.submitFTS(self.__ftsVersion, command=self.SUBMIT_COMMAND,
pinTime=self.PIN_TIME if seStatus['TapeSE'] else 0)
if not submit["OK"]:
log.error("unable to submit FTSJob:", submit["Message"])
continue
log.info("FTSJob '%s'@'%s' has been submitted" % (ftsJob.FTSGUID, ftsJob.FTSServer))
# # update statuses for job files
for ftsFile in ftsJob:
ftsFile.FTSGUID = ftsJob.FTSGUID
ftsFile.Status = "Submitted"
ftsFile.Attempt += 1
# # update placement route
try:
self.updateLock().acquire()
self.__ftsPlacement.startTransferOnRoute(route)
finally:
self.updateLock().release()
ftsJobs.append(ftsJob)
log.info("%s new FTSJobs have been submitted" % len(ftsJobs))
return S_OK(ftsJobs)
def __monitorJob(self, request, ftsJob):
""" execute FTSJob.monitorFTS for a given :ftsJob:
if ftsJob is in a final state, finalize it
:param Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger("req_%s/%s/monitor/%s" % (request.RequestID, request.RequestName, ftsJob.FTSGUID))
log.info("FTSJob '%s'@'%s'" % (ftsJob.FTSGUID, ftsJob.FTSServer))
# # this will be returned
ftsFilesDict = dict((k, list()) for k in ("toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate"))
monitor = ftsJob.monitorFTS(
self.__ftsVersion, command=self.MONITOR_COMMAND)
if not monitor["OK"]:
gMonitor.addMark("FTSMonitorFail", 1)
log.error(monitor["Message"])
if "getTransferJobSummary2: Not authorised to query request" in monitor["Message"] or\
'was not found' in monitor['Message'] or\
"Not found" in monitor['Message'] or\
'Unknown transfer state' in monitor['Message']:
log.error("FTSJob not known (expired on server?): delete it")
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append(ftsFile)
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob(ftsJob.FTSJobID)
if not res['OK']:
log.error("Unable to delete FTSJob", res['Message'])
return S_OK(ftsFilesDict)
return monitor
monitor = monitor["Value"]
log.info("FTSJob Status = %s Completeness = %s%%" % (ftsJob.Status, ftsJob.Completeness))
# # monitor status change
gMonitor.addMark("FTSJobs%s" % ftsJob.Status, 1)
if ftsJob.Status in FTSJob.FINALSTATES:
finalizeFTSJob = self.__finalizeFTSJob(request, ftsJob)
if not finalizeFTSJob["OK"]:
if 'Unknown transfer state' in finalizeFTSJob['Message']:
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append(ftsFile)
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob(ftsJob.FTSJobID)
if not res['OK']:
log.error("Unable to delete FTSJob", res['Message'])
else:
log.error(finalizeFTSJob["Message"])
return finalizeFTSJob
else:
ftsFilesDict = self.updateFTSFileDict(ftsFilesDict, finalizeFTSJob["Value"])
return S_OK(ftsFilesDict)
def __finalizeFTSJob(self, request, ftsJob):
""" finalize FTSJob
:param ~DIRAC.RequestManagementSystem.Client.Request.Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger("req_%s/%s/monitor/%s/finalize" % (request.RequestID,
request.RequestName,
ftsJob.FTSJobID))
log.info("finalizing FTSJob %s@%s" % (ftsJob.FTSGUID, ftsJob.FTSServer))
# # this will be returned
ftsFilesDict = dict((k, list()) for k in ("toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate"))
monitor = ftsJob.monitorFTS(
self.__ftsVersion, command=self.MONITOR_COMMAND, full=True)
if not monitor["OK"]:
log.error(monitor["Message"])
return monitor
# # split FTSFiles to different categories
processFiles = self.__filterFiles(ftsJob)
if not processFiles["OK"]:
log.error(processFiles["Message"])
return processFiles
processFiles = processFiles['Value']
if processFiles['toRegister']:
log.error("Some files could not be registered in FC:", len(processFiles['toRegister']))
ftsFilesDict = self.updateFTSFileDict(ftsFilesDict, processFiles)
# # send accounting record for this job
self.__sendAccounting(ftsJob, request.OwnerDN)
# # update placement - remove this job from placement
route = self.__ftsPlacement.findRoute(ftsJob.SourceSE, ftsJob.TargetSE)
if route["OK"]:
try:
self.updateLock().acquire()
self.__ftsPlacement.finishTransferOnRoute(route['Value'])
finally:
self.updateLock().release()
log.info("FTSJob is finalized")
return S_OK(ftsFilesDict)
def __checkFailed(self, ftsFile):
reschedule = False
submit = False
fail = False
if ftsFile.Status in ("Failed", 'Canceled'):
if ftsFile.Error == "MissingSource":
reschedule = True
else:
submit = bool(ftsFile.Attempt < self.MAX_ATTEMPT)
fail = not submit
return reschedule, submit, fail
def __filterFiles(self, ftsJob):
""" process ftsFiles from finished ftsJob
:param FTSJob ftsJob: monitored FTSJob instance
"""
# # lists for different categories
toUpdate = []
toReschedule = []
toRegister = []
toSubmit = []
toFail = []
# # loop over files in fts job
for ftsFile in ftsJob:
# # successful files
if ftsFile.Status == "Finished":
if ftsFile.Error == "AddCatalogReplicaFailed":
toRegister.append(ftsFile)
toUpdate.append(ftsFile)
continue
reschedule, submit, fail = self.__checkFailed(ftsFile)
if reschedule:
toReschedule.append(ftsFile)
elif submit:
toSubmit.append(ftsFile)
elif fail:
toFail.append(ftsFile)
return S_OK({"toUpdate": toUpdate,
"toSubmit": toSubmit,
"toRegister": toRegister,
"toReschedule": toReschedule,
"toFail": toFail})
def __insertRegisterOperation(self, request, operation, toRegister):
""" add RegisterReplica operation
:param Request request: request instance
:param Operation transferOp: 'ReplicateAndRegister' operation for this FTSJob
:param list toRegister: [ FTSDB.FTSFile, ... ] - files that failed to register
"""
log = self.log.getSubLogger("req_%s/%s/registerFiles" % (request.RequestID, request.RequestName))
byTarget = {}
for ftsFile in toRegister:
if ftsFile.TargetSE not in byTarget:
byTarget.setdefault(ftsFile.TargetSE, [])
byTarget[ftsFile.TargetSE].append(ftsFile)
log.info("will create %s 'RegisterReplica' operations" % len(byTarget))
for target, ftsFileList in byTarget.iteritems():
log.info("creating 'RegisterReplica' operation for targetSE %s with %s files..." % (target,
len(ftsFileList)))
registerOperation = Operation()
registerOperation.Type = "RegisterReplica"
registerOperation.Status = "Waiting"
registerOperation.TargetSE = target
targetSE = StorageElement(target)
for ftsFile in ftsFileList:
opFile = File()
opFile.LFN = ftsFile.LFN
pfn = returnSingleResult(targetSE.getURL(ftsFile.LFN, protocol=self.registrationProtocols))
if not pfn["OK"]:
continue
opFile.PFN = pfn["Value"]
registerOperation.addFile(opFile)
request.insertBefore(registerOperation, operation)
return S_OK()
@staticmethod
def __sendAccounting(ftsJob, ownerDN):
""" prepare and send DataOperation to AccouringDB """
dataOp = DataOperation()
dataOp.setStartTime(fromString(ftsJob.SubmitTime))
dataOp.setEndTime(fromString(ftsJob.LastUpdate))
accountingDict = dict()
accountingDict["OperationType"] = "ReplicateAndRegister"
username = getUsernameForDN(ownerDN)
if not username["OK"]:
username = ownerDN
else:
username = username["Value"]
accountingDict["User"] = username
accountingDict["Protocol"] = "FTS3" if 'fts3' in ftsJob.FTSServer.lower() else 'FTS'
accountingDict['ExecutionSite'] = ftsJob.FTSServer
accountingDict['RegistrationTime'] = ftsJob._regTime # pylint: disable=protected-access
accountingDict['RegistrationOK'] = ftsJob._regSuccess # pylint: disable=protected-access
accountingDict['RegistrationTotal'] = ftsJob._regTotal # pylint: disable=protected-access
accountingDict["TransferOK"] = len([f for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES])
accountingDict["TransferTotal"] = len(ftsJob)
accountingDict["TransferSize"] = ftsJob.Size - ftsJob.FailedSize
accountingDict["FinalStatus"] = ftsJob.Status
accountingDict["Source"] = ftsJob.SourceSE
accountingDict["Destination"] = ftsJob.TargetSE
accountingDict['TransferTime'] = sum(int(f._duration) for f in ftsJob # pylint: disable=protected-access
if f.Status in FTSFile.SUCCESS_STATES)
dataOp.setValuesFromDict(accountingDict)
dataOp.commit()
def __checkReadyReplicas(self, request, operation):
""" check ready replicas for transferOperation """
log = self.log.getSubLogger("req_%s/%s/checkReadyReplicas" % (request.RequestID, request.RequestName))
targetSESet = set(operation.targetSEList)
# # { LFN: [ targetSE, ... ] }
missingReplicas = {}
scheduledFiles = dict((opFile.LFN, opFile) for opFile in operation if opFile.Status in ("Scheduled", "Waiting"))
# # get replicas
replicas = FileCatalog().getReplicas(scheduledFiles.keys())
if not replicas["OK"]:
self.log.error(replicas["Message"])
return replicas
replicas = replicas["Value"]
fullyReplicated = 0
missingSEs = {}
for successfulLFN in replicas["Successful"]:
reps = set(replicas['Successful'][successfulLFN])
if targetSESet.issubset(reps):
log.verbose("%s has been replicated to all targets" % successfulLFN)
fullyReplicated += 1
scheduledFiles[successfulLFN].Status = "Done"
else:
missingReplicas[successfulLFN] = sorted(targetSESet - reps)
ses = ",".join(missingReplicas[successfulLFN])
missingSEs[ses] = missingSEs.setdefault(ses, 0) + 1
log.verbose("%s is still missing at %s" % (successfulLFN, ses))
if fullyReplicated:
log.info("%d new files have been replicated to all targets" % fullyReplicated)
if missingSEs:
for ses in missingSEs:
log.info("%d replicas still missing at %s" % (missingSEs[ses], ses))
reMissing = re.compile("no such file or directory")
for failedLFN, errStr in replicas["Failed"].iteritems():
scheduledFiles[failedLFN].Error = errStr
if reMissing.search(errStr.lower()):
log.error("%s is missing, setting its status to 'Failed'" % failedLFN)
scheduledFiles[failedLFN].Status = "Failed"
else:
log.warn("unable to read replicas for %s: %s" % (failedLFN, errStr))
return S_OK(missingReplicas)
def __filterReplicas(self, opFile):
""" filter out banned/invalid source SEs """
from DIRAC.DataManagementSystem.Agent.RequestOperations.ReplicateAndRegister import filterReplicas
return filterReplicas(opFile, logger=self.log, dataManager=self.dataManager)
|
arrabito/DIRAC
|
DataManagementSystem/Agent/FTSAgent.py
|
Python
|
gpl-3.0
| 51,312
|
[
"DIRAC"
] |
108b2c974607949bb3d5f5dd6fb7600777502824f5455a8fc3148fb1a71cf02e
|
from galaxy.jobs import runners
def test_default_specs():
# recheck_missing_job_retries is integer >= 0
params = runners.RunnerParams( specs=runners.BaseJobRunner.DEFAULT_SPECS, params=dict( recheck_missing_job_retries="1" ) )
assert params.recheck_missing_job_retries == 1
assert params["recheck_missing_job_retries"] == 1
exception_raised = False
try:
runners.RunnerParams( specs=runners.BaseJobRunner.DEFAULT_SPECS, params=dict( recheck_missing_job_retries=-1 ) )
except Exception:
exception_raised = True
assert exception_raised
def test_missing_parameter():
exception = None
try:
runners.RunnerParams( specs={}, params=dict( foo="bar" ) )
except Exception as e:
exception = e
assert exception.message == runners.JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE % "foo"
def test_invalid_parameter():
exception = None
try:
runners.RunnerParams( specs=dict( foo=dict( valid=lambda x: x != "bar", defualt="baz" ) ), params=dict( foo="bar" ) )
except Exception as e:
exception = e
assert exception.message == runners.JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE % "foo"
def test_map_problem():
exception = None
try:
runners.RunnerParams( specs=dict( foo=dict( map=lambda x: 1 / 0, default="baz" ) ), params=dict( foo="bar" ) )
except Exception as e:
exception = e
assert exception.message == runners.JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE % ( "foo", "bar" )
def test_param_default():
runner_params = runners.RunnerParams( specs=dict( foo=dict( default="baz" ) ), params={} )
assert runner_params["foo"] == "baz"
assert runner_params.foo == "baz"
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/unit/jobs/test_runner_params.py
|
Python
|
gpl-3.0
| 1,705
|
[
"Galaxy"
] |
f6a18f8aa5b5fb4927035fd878aeb8b12fb007cbee845aa744c6cb6b5c2ec4e8
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Wave Function Stability Analysis
Ref.
JCP, 66, 3045 (1977); DOI:10.1063/1.434318
JCP 104, 9047 (1996); DOI:10.1063/1.471637
'''
import numpy
import scipy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
def rhf_stability(mf, internal=True, external=False, verbose=None):
if internal:
rhf_internal(mf, verbose)
if external:
rhf_external(mf, verbose)
def uhf_stability(mf, internal=True, external=False, verbose=None):
if internal:
uhf_internal(mf, verbose)
if external:
uhf_external(mf, verbose)
def rhf_internal(mf, verbose=None):
log = logger.new_logger(mf, verbose)
mol = mf.mol
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nmo = mo_coeff.shape[1]
nocc = numpy.count_nonzero(mo_occ)
nvir = nmo - nocc
eri_mo = ao2mo.full(mol, mo_coeff)
eri_mo = ao2mo.restore(1, eri_mo, nmo)
eai = lib.direct_sum('a-i->ai', mo_energy[nocc:], mo_energy[:nocc])
# A
h = numpy.einsum('ckld->kcld', eri_mo[nocc:,:nocc,:nocc,nocc:]) * 2
h-= numpy.einsum('cdlk->kcld', eri_mo[nocc:,nocc:,:nocc,:nocc])
for a in range(nvir):
for i in range(nocc):
h[i,a,i,a] += eai[a,i]
# B
h+= numpy.einsum('ckdl->kcld', eri_mo[nocc:,:nocc,nocc:,:nocc]) * 2
h-= numpy.einsum('cldk->kcld', eri_mo[nocc:,:nocc,nocc:,:nocc])
nov = nocc * nvir
e = scipy.linalg.eigh(h.reshape(nov,nov))[0]
log.debug('rhf_internal: lowest eigs = %s', e[e<=max(e[0],1e-5)])
if e[0] < -1e-5:
log.log('RHF wavefunction has an internal instability')
else:
log.log('RHF wavefunction is stable in the internal stability analysis')
def rhf_external(mf, verbose=None):
log = logger.new_logger(mf, verbose)
mol = mf.mol
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nmo = mo_coeff.shape[1]
nocc = numpy.count_nonzero(mo_occ)
nvir = nmo - nocc
nov = nocc * nvir
eri_mo = ao2mo.full(mol, mo_coeff)
eri_mo = ao2mo.restore(1, eri_mo, nmo)
eai = lib.direct_sum('a-i->ai', mo_energy[nocc:], mo_energy[:nocc])
# A
h = numpy.einsum('ckld->kcld', eri_mo[nocc:,:nocc,:nocc,nocc:]) * 2
h-= numpy.einsum('cdlk->kcld', eri_mo[nocc:,nocc:,:nocc,:nocc])
for a in range(nvir):
for i in range(nocc):
h[i,a,i,a] += eai[a,i]
# B
h-= numpy.einsum('ckdl->kcld', eri_mo[nocc:,:nocc,nocc:,:nocc]) * 2
h+= numpy.einsum('cldk->kcld', eri_mo[nocc:,:nocc,nocc:,:nocc])
e1 = scipy.linalg.eigh(h.reshape(nov,nov))[0]
log.debug('rhf_external: lowest eigs = %s', e1[e1<=max(e1[0],1e-5)])
if e1[0] < -1e-5:
log.log('RHF wavefunction has an RHF real -> complex instability')
else:
log.log('RHF wavefunction is stable in the RHF real -> complex stability analysis')
h =-numpy.einsum('cdlk->kcld', eri_mo[nocc:,nocc:,:nocc,:nocc])
for a in range(nvir):
for i in range(nocc):
h[i,a,i,a] += eai[a,i]
h-= numpy.einsum('cldk->kcld', eri_mo[nocc:,:nocc,nocc:,:nocc])
e3 = scipy.linalg.eigh(h.reshape(nov,nov))[0]
log.debug('rhf_external: lowest eigs of H = %s', e3[e3<=max(e3[0],1e-5)])
if e3[0] < -1e-5:
log.log('RHF wavefunction has an RHF -> UHF instability.')
else:
log.log('RHF wavefunction is stable in the RHF -> UHF stability analysis')
def uhf_internal(mf, verbose=None):
log = logger.new_logger(mf, verbose)
mol = mf.mol
mo_a, mo_b = mf.mo_coeff
mo_ea, mo_eb = mf.mo_energy
mo_occa, mo_occb = mf.mo_occ
nmo = mo_a.shape[1]
nocca = numpy.count_nonzero(mo_occa)
noccb = numpy.count_nonzero(mo_occb)
nvira = nmo - nocca
nvirb = nmo - noccb
eri_aa = ao2mo.restore(1, ao2mo.full(mol, mo_a), nmo)
eri_ab = ao2mo.restore(1, ao2mo.general(mol, [mo_a,mo_a,mo_b,mo_b]), nmo)
eri_bb = ao2mo.restore(1, ao2mo.full(mol, mo_b), nmo)
# alpha -> alpha
haa = numpy.einsum('aijb->iajb', eri_aa[nocca:,:nocca,:nocca,nocca:]) * 2
haa-= numpy.einsum('abji->iajb', eri_aa[nocca:,nocca:,:nocca,:nocca])
haa-= numpy.einsum('ajbi->iajb', eri_aa[nocca:,:nocca,nocca:,:nocca])
for a in range(nvira):
for i in range(nocca):
haa[i,a,i,a] += mo_ea[nocca+a] - mo_ea[i]
# beta -> beta
hbb = numpy.einsum('aijb->iajb', eri_bb[noccb:,:noccb,:noccb,noccb:]) * 2
hbb-= numpy.einsum('abji->iajb', eri_bb[noccb:,noccb:,:noccb,:noccb])
hbb-= numpy.einsum('ajbi->iajb', eri_bb[noccb:,:noccb,noccb:,:noccb])
for a in range(nvirb):
for i in range(noccb):
hbb[i,a,i,a] += mo_eb[noccb+a] - mo_eb[i]
# (alpha -> alpha, beta -> beta)
hab = numpy.einsum('aijb->iajb', eri_ab[nocca:,:nocca,:noccb,noccb:]) * 2
nova = nocca * nvira
novb = noccb * nvirb
hall = numpy.empty((nova+novb,nova+novb))
hall[:nova,:nova] = haa.reshape(nova,nova)
hall[nova:,nova:] = hbb.reshape(novb,novb)
hall[:nova,nova:] = hab.reshape(nova,novb)
hall[nova:,:nova] = hab.reshape(nova,novb).T
e = scipy.linalg.eigh(hall)[0]
log.debug('uhf_internal: lowest eigs of H = %s', e[e<=max(e[0],1e-5)])
if e[0] < -1e-5:
log.log('UHF wavefunction has an internal instability. '
'It maybe corresponds to (spatial) symmetry broken wfn.')
else:
log.log('UHF wavefunction is stable in the internal stability analysis')
def uhf_external(mf, verbose=None):
log = logger.new_logger(mf, verbose)
mol = mf.mol
mo_a, mo_b = mf.mo_coeff
mo_ea, mo_eb = mf.mo_energy
mo_occa, mo_occb = mf.mo_occ
nmo = mo_a.shape[1]
nocca = numpy.count_nonzero(mo_occa)
noccb = numpy.count_nonzero(mo_occb)
nvira = nmo - nocca
nvirb = nmo - noccb
eri_aa = ao2mo.restore(1, ao2mo.full(mol, mo_a), nmo)
eri_ab = ao2mo.restore(1, ao2mo.general(mol, [mo_a,mo_a,mo_b,mo_b]), nmo)
eri_bb = ao2mo.restore(1, ao2mo.full(mol, mo_b), nmo)
# alpha -> alpha
haa =-numpy.einsum('abji->iajb', eri_aa[nocca:,nocca:,:nocca,:nocca])
haa+= numpy.einsum('ajbi->iajb', eri_aa[nocca:,:nocca,nocca:,:nocca])
for a in range(nvira):
for i in range(nocca):
haa[i,a,i,a] += mo_ea[nocca+a] - mo_ea[i]
# beta -> beta
hbb =-numpy.einsum('abji->iajb', eri_bb[noccb:,noccb:,:noccb,:noccb])
hbb+= numpy.einsum('ajbi->iajb', eri_bb[noccb:,:noccb,noccb:,:noccb])
for a in range(nvirb):
for i in range(noccb):
hbb[i,a,i,a] += mo_eb[noccb+a] - mo_eb[i]
nova = nocca * nvira
novb = noccb * nvirb
hall = numpy.zeros((nova+novb,nova+novb))
hall[:nova,:nova] = haa.reshape(nova,nova)
hall[nova:,nova:] = hbb.reshape(novb,novb)
e1 = scipy.linalg.eigh(hall)[0]
log.debug('uhf_external: lowest eigs of H = %s', e1[e1<=max(e1[0],1e-5)])
if e1[0] < -1e-5:
log.log('UHF wavefunction has an UHF real -> complex instability')
else:
log.log('UHF wavefunction is stable in the UHF real -> complex stability analysis')
h11 =-numpy.einsum('abji->iajb', eri_ab[nocca:,nocca:,:noccb,:noccb])
for a in range(nvira):
for i in range(noccb):
h11[i,a,i,a] += mo_ea[nocca+a] - mo_eb[i]
h22 =-numpy.einsum('jiab->iajb', eri_ab[:nocca,:nocca,noccb:,noccb:])
for a in range(nvirb):
for i in range(nocca):
h22[i,a,i,a] += mo_eb[noccb+a] - mo_ea[i]
h12 =-numpy.einsum('ajbi->iajb', eri_ab[nocca:,:nocca,noccb:,:noccb])
h21 =-numpy.einsum('biaj->iajb', eri_ab[nocca:,:nocca,noccb:,:noccb])
n1 = noccb * nvira
n2 = nocca * nvirb
hall = numpy.empty((n1+n2,n1+n2))
hall[:n1,:n1] = h11.reshape(n1,n1)
hall[n1:,n1:] = h22.reshape(n2,n2)
hall[:n1,n1:] = h12.reshape(n1,n2)
hall[n1:,:n1] = h21.reshape(n2,n1)
e3 = scipy.linalg.eigh(hall)[0]
log.debug('uhf_external: lowest eigs of H = %s', e3[e3<=max(e3[0],1e-5)])
if e3[0] < -1e-5:
log.log('UHF wavefunction has an UHF -> GHF instability.')
else:
log.log('UHF wavefunction is stable in the UHF -> GHF stability analysis')
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='O 0 0 0; O 0 0 1.2222', basis='631g*')
mf = scf.RHF(mol).run()
rhf_stability(mf, True, True, verbose=5)
mf = scf.UHF(mol).run()
uhf_stability(mf, not True, True, verbose=5)
mol.spin = 2
mf = scf.UHF(mol).run()
uhf_stability(mf, True, True, verbose=5)
mol = gto.M(atom='''
O1
O2 1 1.2227
O3 1 1.2227 2 114.0451
''', basis = '631g*')
mf = scf.RHF(mol).run()
rhf_stability(mf, True, True, verbose=5)
mf = scf.UHF(mol).run()
uhf_stability(mf, True, True, verbose=5)
|
sunqm/pyscf
|
pyscf/scf/stability_slow.py
|
Python
|
apache-2.0
| 9,365
|
[
"PySCF"
] |
5793d1934aaf9912bf82f85f9b0987b3f3fc1be9c024c1ab9ad7f7128455134e
|
#! /usr/bin/python
###### SUM STATES #######
# Python script for summing and ploting the data from the Density Of States
# files obtained from projwfc.x. It can sum also k-solved dos, and make a plot
# with mathplotlib (if not available, gnuplot, if not avaible, print to file)
# if there is not X11 forwarding, plots in terminal.
# It does something very similar to sumpdos.f90, but with
# some extra features (use "-h" option).
#
# it takes two different inputs, the first one is the pw.x output
# ("-o" option), which is used for parsing the Fermi energy for fitting
# the PDOS curve to the right energy. The other files are the pDOS files
# ("-s" option), that can be given with shell syntax, i.e.
# pdos_atm*Fe*wfc*d* for summing all the d orbitals of Fe.
# It can also handle k solved dos files.
#
# One of the most useful feature, compared to the sumpdos.x, is the
# fact that it also builds the picture directly, so it can be directly
# visualized and exported for inclusion in a document.
# It uses mathplotlib for plotting, but if no mathplotlib is found in
# the $PYTHONPATH, it tries to use gnuplot, if no gnuplot available,
# dumps the output data to a file.
# In the that no X11 forwarding is available (i.e. ssh to the cluster),
# it shows a rough graph in the terminal, so we get an idea of the shape
# of the results.
#
# Example of usage:
# cd ....../espresso-5.0/PP/examples/example02/results/
# ../../../src/sum_states.py -o ni.dos.out -s
# ni.pdos_atm#1\(Ni\)_wfc#2\(d\) -t "Example PP/02" -xr -6 2
#
#
# The procedure for obtaining the DOS files is explained
# i.e. in (espresso-dir)/PP/examples/example02/
#
# Author: Dr. Julen Larrucea
# University of Bremen,
# Bremen Centre for Computational Materials Science, HMI Group
# julenl [at] gmail.com or larrucea [at] hmi.uni-bremen.de
#
# This file is distributed under the terms of the GNU General Public
# License. See the file `License'
# in the root directory of the present distribution,
# or http://www.gnu.org/copyleft/gpl.txt .
#######################
import sys
import os
import fnmatch
import linecache
# Some default variables
version=0.2
pwout=""
selat="*"
graphtitle=""
min_x,max_x=-10,3
min_y,max_y="",""
output_file_name="sum_dos.out"
prt="no"
print " #### sum_states.py version "+str(version)+" #### "
# Check if X11, mathplotlib and gnuplot are available
try:
os.popen("gnuplot -V").read()
prog_gnuplot="yes" # gnuplot is installed
except:
prog_gnuplot="no"
# Parse command line options
if len(sys.argv)>1:
for i in sys.argv:
if i.startswith('-'):
option=i.split('-')[1]
if option=="o":
pwout= sys.argv[sys.argv.index('-o')+1]
if option=="s":
selat= sys.argv[sys.argv.index('-s')+1]
if option=="p":
prt="yes"
if len(sys.argv) > sys.argv.index('-p')+1: # if there is a name after "-p" take it as an output name
if sys.argv[sys.argv.index('-p')+1] != "-": # otherwise default name sum_dos.out
dos_out_name=sys.argv[sys.argv.index('-p')+1]
if option=="t":
graphtitle= sys.argv[sys.argv.index('-t')+1]
if option=="xr":
min_x,max_x= float(sys.argv[sys.argv.index('-xr')+1]),float(sys.argv[sys.argv.index('-xr')+2])
if option=="yr":
min_y,max_y= float(sys.argv[sys.argv.index('-yr')+1]),float(sys.argv[sys.argv.index('-yr')+2])
if option=="v":
print "sum_dos.py version: "+version
sys.exit()
if option=="h":
print '''
-o QE output file name (for grepping Fermi E)
-s Selection of atoms for summing the DOSes. "*" for all, *1*Fe*d* for first Fe atom " (def. "*")
-p Print output to a file and aditionaly provide an output name (def. no output and "sum_dos.out")
-t set title in the head of the graph
-xr set min and max x value for the axes in the graph
-yr set min and max y value for the axes in the graph
-h print this help
-v print version
Example: sum_states.py --s sys.pdos_atm#4\(Fe2\)_wfc#2\(d\) -t "Wustite LDA+U single Fe" -xr -9 4
'''
sys.exit()
# Check for mathplotlib/gnuplot and import mpl if possible
if len(os.popen('echo $DISPLAY').read()) > 1:
graphic_plot="yes"
try:
from pylab import *
mplplot="yes"
print "pylab imported"
except:
print "There is no mathplotlib installed. Using gnuplot."
mplplot="no"
prt="yes"
else:
print "No X11. Trying to plot on terminal"
graphic_plot="no"
if prog_gnuplot=="no":
prt="yes"
# if not specified, try to find the espresso output, in order to parse the Fermi energy
if pwout == "":
for filen in filter(os.path.isfile, os.listdir('.')):
if "Program PWSCF" in linecache.getline(filen, 2):
print "Using " + filen + " as pw.x output. You can specify another one with the -o option."
pwout=filen
# Parse Fermi energy from the pw.x output
if pwout!="":
try:
os.popen("grep -a 'the Fermi energy is' "+pwout ).read()
fermi=float(os.popen("grep -a 'the Fermi energy is' "+pwout ).read().split()[4])
print "Fermi energy = ", fermi, "a.u."
except:
print "WARNING: No Fermi energy found. Using 0 e.V. instead"
fermi=0
else:
print "WARNING: No pw.x output found. Using E Fermi = 0 e.V."
fermi=0
# List of all DOS files to add
dosfiles=[]
for dfile in os.listdir('.'):
if fnmatch.fnmatch(dfile, selat):
dosfiles.append(dfile)
if len(dosfiles)==0:
print "ERROR: Provide a (list of) valid DOS file(s)"
sys.exit()
print "dosfiles list: ",
for dosfile in dosfiles:
print dosfile,
print ""
# Check wetter we have k-solved DOS
if open(dosfiles[0],'r').readline().split()[1]=="E":
ksolved="no"
print "no ksolved"
elif open(dosfiles[0],'r').readline().split()[1]=="ik":
ksolved="yes"
print "ksolved"
# Sum over all k-points and files
mat=[] # matrix with total sum of ldos
for i in range(len(dosfiles)):
mati=[] # temporal matrix for each DOS file "i"
k=0
for line in open(dosfiles[i],'r'):
if len(line) > 10 and line.split()[0] != "#":
if ksolved=="no":
mati.append([float(line.split()[0]),float(line.split()[1]),float(line.split()[2])])
if ksolved=="yes":
ik = int(line.split()[0])
if ik > k: #if it is a different k block
k=int(line.split()[0])
oldmat=[] # temporal matrix for each k-point
if ik == 1:
mati.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])]) # append: energy, ldosup, ldosdw
elif ik == k and k > 1:
oldmat.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])
elif len(line) < 5 and k > 1: #if blank line, sum k-frame to the total
for j in range(len(oldmat)):
mati[j]=[mati[j][0],mati[j][1]+oldmat[j][1],mati[j][2]+oldmat[j][2]]
if mat == []: # if it is the first dos file, copy total matrix (mat) = the first dos files's data
mat=mati[:]
else:
for j in range(len(mati)): # if it is not the first file, sum values
mat[j]=[mat[j][0],mat[j][1]+mati[j][1],mat[j][2]+mati[j][2]]
print "...ploting..."
if prt=="yes":
out=open(output_file_name,"w")
x,y1,y2=[],[],[]
for i in mat:
x.append(i[0]-fermi)
y1.append(i[1])
y2.append(-i[2])
if prt=="yes": # print to a file
print>>out, i[0]-fermi, i[1], i[2]
if prt=="yes":
out.close()
if graphic_plot=="yes":
# if there is matplotlib, generate a plot with it
if mplplot=="yes":
plot(x,y1,linewidth=1.0)
plot(x,y2,linewidth=1.0)
print min(y2),max(y1)
plt.title(graphtitle)
plt.xlabel('E (eV)')
plt.ylabel('States')
plt.grid(True)
plt.rcParams.update({'font.size': 22})
plt.fill(x,y1,color='0.8')
plt.fill(x,y2,color='0.9')
if min_x and max_x:
fromx,tox=min_x,max_x
plt.axis([fromx, tox, min(y2), max(y1)])
show()
elif mplplot=="no" and prog_gnuplot=="yes": # If no mathplotlib available, use gnuplot
os.system("echo \"plot '"+ output_file_name + "' using ($1-"+str(fermi)+"):2 w l, '' u ($1"+str(fermi)+"):3 w l\" | gnuplot -persist")
elif graphic_plot=="no": # If no X forwarding available, show graph in terminal
if prog_gnuplot=="yes":
os.system("echo \"set terminal dumb; plot '"+ output_file_name + "' using ($1-"+str(fermi)+"):2 w l, '' u ($1-"+str(fermi)+"):3 w l\" | gnuplot -persist")
|
qiaojunfeng/q-e
|
PP/tools/sum_states.py
|
Python
|
gpl-2.0
| 8,217
|
[
"ESPResSo"
] |
1fb69c2d28f88dd6ad36ef5d9186a4f63446f773ec182091f0f89922938331cd
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import io
import math
import os
import copy
from math import pi, cos, sin, tan, sqrt
from mathutils import Vector, Matrix
from copy import copy
# -----------------------------------------------------------------------------
# Atom, stick and element data
# This is a list that contains some data of all possible elements. The structure
# is as follows:
#
# 1, "Hydrogen", "H", [0.0,0.0,1.0], 0.32, 0.32, 0.32 , -1 , 1.54 means
#
# No., name, short name, color, radius (used), radius (covalent), radius (atomic),
#
# charge state 1, radius (ionic) 1, charge state 2, radius (ionic) 2, ... all
# charge states for any atom are listed, if existing.
# The list is fixed and cannot be changed ... (see below)
ATOM_CLUSTER_ELEMENTS_DEFAULT = (
( 1, "Hydrogen", "H", ( 1.0, 1.0, 1.0), 0.32, 0.32, 0.79 , -1 , 1.54 ),
( 2, "Helium", "He", ( 0.85, 1.0, 1.0), 0.93, 0.93, 0.49 ),
( 3, "Lithium", "Li", ( 0.8, 0.50, 1.0), 1.23, 1.23, 2.05 , 1 , 0.68 ),
( 4, "Beryllium", "Be", ( 0.76, 1.0, 0.0), 0.90, 0.90, 1.40 , 1 , 0.44 , 2 , 0.35 ),
( 5, "Boron", "B", ( 1.0, 0.70, 0.70), 0.82, 0.82, 1.17 , 1 , 0.35 , 3 , 0.23 ),
( 6, "Carbon", "C", ( 0.56, 0.56, 0.56), 0.77, 0.77, 0.91 , -4 , 2.60 , 4 , 0.16 ),
( 7, "Nitrogen", "N", ( 0.18, 0.31, 0.97), 0.75, 0.75, 0.75 , -3 , 1.71 , 1 , 0.25 , 3 , 0.16 , 5 , 0.13 ),
( 8, "Oxygen", "O", ( 1.0, 0.05, 0.05), 0.73, 0.73, 0.65 , -2 , 1.32 , -1 , 1.76 , 1 , 0.22 , 6 , 0.09 ),
( 9, "Fluorine", "F", ( 0.56, 0.87, 0.31), 0.72, 0.72, 0.57 , -1 , 1.33 , 7 , 0.08 ),
(10, "Neon", "Ne", ( 0.70, 0.89, 0.96), 0.71, 0.71, 0.51 , 1 , 1.12 ),
(11, "Sodium", "Na", ( 0.67, 0.36, 0.94), 1.54, 1.54, 2.23 , 1 , 0.97 ),
(12, "Magnesium", "Mg", ( 0.54, 1.0, 0.0), 1.36, 1.36, 1.72 , 1 , 0.82 , 2 , 0.66 ),
(13, "Aluminium", "Al", ( 0.74, 0.65, 0.65), 1.18, 1.18, 1.82 , 3 , 0.51 ),
(14, "Silicon", "Si", ( 0.94, 0.78, 0.62), 1.11, 1.11, 1.46 , -4 , 2.71 , -1 , 3.84 , 1 , 0.65 , 4 , 0.42 ),
(15, "Phosphorus", "P", ( 1.0, 0.50, 0.0), 1.06, 1.06, 1.23 , -3 , 2.12 , 3 , 0.44 , 5 , 0.35 ),
(16, "Sulfur", "S", ( 1.0, 1.0, 0.18), 1.02, 1.02, 1.09 , -2 , 1.84 , 2 , 2.19 , 4 , 0.37 , 6 , 0.30 ),
(17, "Chlorine", "Cl", ( 0.12, 0.94, 0.12), 0.99, 0.99, 0.97 , -1 , 1.81 , 5 , 0.34 , 7 , 0.27 ),
(18, "Argon", "Ar", ( 0.50, 0.81, 0.89), 0.98, 0.98, 0.88 , 1 , 1.54 ),
(19, "Potassium", "K", ( 0.56, 0.25, 0.83), 2.03, 2.03, 2.77 , 1 , 0.81 ),
(20, "Calcium", "Ca", ( 0.23, 1.0, 0.0), 1.74, 1.74, 2.23 , 1 , 1.18 , 2 , 0.99 ),
(21, "Scandium", "Sc", ( 0.90, 0.90, 0.90), 1.44, 1.44, 2.09 , 3 , 0.73 ),
(22, "Titanium", "Ti", ( 0.74, 0.76, 0.78), 1.32, 1.32, 2.00 , 1 , 0.96 , 2 , 0.94 , 3 , 0.76 , 4 , 0.68 ),
(23, "Vanadium", "V", ( 0.65, 0.65, 0.67), 1.22, 1.22, 1.92 , 2 , 0.88 , 3 , 0.74 , 4 , 0.63 , 5 , 0.59 ),
(24, "Chromium", "Cr", ( 0.54, 0.6, 0.78), 1.18, 1.18, 1.85 , 1 , 0.81 , 2 , 0.89 , 3 , 0.63 , 6 , 0.52 ),
(25, "Manganese", "Mn", ( 0.61, 0.47, 0.78), 1.17, 1.17, 1.79 , 2 , 0.80 , 3 , 0.66 , 4 , 0.60 , 7 , 0.46 ),
(26, "Iron", "Fe", ( 0.87, 0.4, 0.2), 1.17, 1.17, 1.72 , 2 , 0.74 , 3 , 0.64 ),
(27, "Cobalt", "Co", ( 0.94, 0.56, 0.62), 1.16, 1.16, 1.67 , 2 , 0.72 , 3 , 0.63 ),
(28, "Nickel", "Ni", ( 0.31, 0.81, 0.31), 1.15, 1.15, 1.62 , 2 , 0.69 ),
(29, "Copper", "Cu", ( 0.78, 0.50, 0.2), 1.17, 1.17, 1.57 , 1 , 0.96 , 2 , 0.72 ),
(30, "Zinc", "Zn", ( 0.49, 0.50, 0.69), 1.25, 1.25, 1.53 , 1 , 0.88 , 2 , 0.74 ),
(31, "Gallium", "Ga", ( 0.76, 0.56, 0.56), 1.26, 1.26, 1.81 , 1 , 0.81 , 3 , 0.62 ),
(32, "Germanium", "Ge", ( 0.4, 0.56, 0.56), 1.22, 1.22, 1.52 , -4 , 2.72 , 2 , 0.73 , 4 , 0.53 ),
(33, "Arsenic", "As", ( 0.74, 0.50, 0.89), 1.20, 1.20, 1.33 , -3 , 2.22 , 3 , 0.58 , 5 , 0.46 ),
(34, "Selenium", "Se", ( 1.0, 0.63, 0.0), 1.16, 1.16, 1.22 , -2 , 1.91 , -1 , 2.32 , 1 , 0.66 , 4 , 0.50 , 6 , 0.42 ),
(35, "Bromine", "Br", ( 0.65, 0.16, 0.16), 1.14, 1.14, 1.12 , -1 , 1.96 , 5 , 0.47 , 7 , 0.39 ),
(36, "Krypton", "Kr", ( 0.36, 0.72, 0.81), 1.31, 1.31, 1.24 ),
(37, "Rubidium", "Rb", ( 0.43, 0.18, 0.69), 2.16, 2.16, 2.98 , 1 , 1.47 ),
(38, "Strontium", "Sr", ( 0.0, 1.0, 0.0), 1.91, 1.91, 2.45 , 2 , 1.12 ),
(39, "Yttrium", "Y", ( 0.58, 1.0, 1.0), 1.62, 1.62, 2.27 , 3 , 0.89 ),
(40, "Zirconium", "Zr", ( 0.58, 0.87, 0.87), 1.45, 1.45, 2.16 , 1 , 1.09 , 4 , 0.79 ),
(41, "Niobium", "Nb", ( 0.45, 0.76, 0.78), 1.34, 1.34, 2.08 , 1 , 1.00 , 4 , 0.74 , 5 , 0.69 ),
(42, "Molybdenum", "Mo", ( 0.32, 0.70, 0.70), 1.30, 1.30, 2.01 , 1 , 0.93 , 4 , 0.70 , 6 , 0.62 ),
(43, "Technetium", "Tc", ( 0.23, 0.61, 0.61), 1.27, 1.27, 1.95 , 7 , 0.97 ),
(44, "Ruthenium", "Ru", ( 0.14, 0.56, 0.56), 1.25, 1.25, 1.89 , 4 , 0.67 ),
(45, "Rhodium", "Rh", ( 0.03, 0.49, 0.54), 1.25, 1.25, 1.83 , 3 , 0.68 ),
(46, "Palladium", "Pd", ( 0.0, 0.41, 0.52), 1.28, 1.28, 1.79 , 2 , 0.80 , 4 , 0.65 ),
(47, "Silver", "Ag", ( 0.75, 0.75, 0.75), 1.34, 1.34, 1.75 , 1 , 1.26 , 2 , 0.89 ),
(48, "Cadmium", "Cd", ( 1.0, 0.85, 0.56), 1.48, 1.48, 1.71 , 1 , 1.14 , 2 , 0.97 ),
(49, "Indium", "In", ( 0.65, 0.45, 0.45), 1.44, 1.44, 2.00 , 3 , 0.81 ),
(50, "Tin", "Sn", ( 0.4, 0.50, 0.50), 1.41, 1.41, 1.72 , -4 , 2.94 , -1 , 3.70 , 2 , 0.93 , 4 , 0.71 ),
(51, "Antimony", "Sb", ( 0.61, 0.38, 0.70), 1.40, 1.40, 1.53 , -3 , 2.45 , 3 , 0.76 , 5 , 0.62 ),
(52, "Tellurium", "Te", ( 0.83, 0.47, 0.0), 1.36, 1.36, 1.42 , -2 , 2.11 , -1 , 2.50 , 1 , 0.82 , 4 , 0.70 , 6 , 0.56 ),
(53, "Iodine", "I", ( 0.58, 0.0, 0.58), 1.33, 1.33, 1.32 , -1 , 2.20 , 5 , 0.62 , 7 , 0.50 ),
(54, "Xenon", "Xe", ( 0.25, 0.61, 0.69), 1.31, 1.31, 1.24 ),
(55, "Caesium", "Cs", ( 0.34, 0.09, 0.56), 2.35, 2.35, 3.35 , 1 , 1.67 ),
(56, "Barium", "Ba", ( 0.0, 0.78, 0.0), 1.98, 1.98, 2.78 , 1 , 1.53 , 2 , 1.34 ),
(57, "Lanthanum", "La", ( 0.43, 0.83, 1.0), 1.69, 1.69, 2.74 , 1 , 1.39 , 3 , 1.06 ),
(58, "Cerium", "Ce", ( 1.0, 1.0, 0.78), 1.65, 1.65, 2.70 , 1 , 1.27 , 3 , 1.03 , 4 , 0.92 ),
(59, "Praseodymium", "Pr", ( 0.85, 1.0, 0.78), 1.65, 1.65, 2.67 , 3 , 1.01 , 4 , 0.90 ),
(60, "Neodymium", "Nd", ( 0.78, 1.0, 0.78), 1.64, 1.64, 2.64 , 3 , 0.99 ),
(61, "Promethium", "Pm", ( 0.63, 1.0, 0.78), 1.63, 1.63, 2.62 , 3 , 0.97 ),
(62, "Samarium", "Sm", ( 0.56, 1.0, 0.78), 1.62, 1.62, 2.59 , 3 , 0.96 ),
(63, "Europium", "Eu", ( 0.38, 1.0, 0.78), 1.85, 1.85, 2.56 , 2 , 1.09 , 3 , 0.95 ),
(64, "Gadolinium", "Gd", ( 0.27, 1.0, 0.78), 1.61, 1.61, 2.54 , 3 , 0.93 ),
(65, "Terbium", "Tb", ( 0.18, 1.0, 0.78), 1.59, 1.59, 2.51 , 3 , 0.92 , 4 , 0.84 ),
(66, "Dysprosium", "Dy", ( 0.12, 1.0, 0.78), 1.59, 1.59, 2.49 , 3 , 0.90 ),
(67, "Holmium", "Ho", ( 0.0, 1.0, 0.61), 1.58, 1.58, 2.47 , 3 , 0.89 ),
(68, "Erbium", "Er", ( 0.0, 0.90, 0.45), 1.57, 1.57, 2.45 , 3 , 0.88 ),
(69, "Thulium", "Tm", ( 0.0, 0.83, 0.32), 1.56, 1.56, 2.42 , 3 , 0.87 ),
(70, "Ytterbium", "Yb", ( 0.0, 0.74, 0.21), 1.74, 1.74, 2.40 , 2 , 0.93 , 3 , 0.85 ),
(71, "Lutetium", "Lu", ( 0.0, 0.67, 0.14), 1.56, 1.56, 2.25 , 3 , 0.85 ),
(72, "Hafnium", "Hf", ( 0.30, 0.76, 1.0), 1.44, 1.44, 2.16 , 4 , 0.78 ),
(73, "Tantalum", "Ta", ( 0.30, 0.65, 1.0), 1.34, 1.34, 2.09 , 5 , 0.68 ),
(74, "Tungsten", "W", ( 0.12, 0.58, 0.83), 1.30, 1.30, 2.02 , 4 , 0.70 , 6 , 0.62 ),
(75, "Rhenium", "Re", ( 0.14, 0.49, 0.67), 1.28, 1.28, 1.97 , 4 , 0.72 , 7 , 0.56 ),
(76, "Osmium", "Os", ( 0.14, 0.4, 0.58), 1.26, 1.26, 1.92 , 4 , 0.88 , 6 , 0.69 ),
(77, "Iridium", "Ir", ( 0.09, 0.32, 0.52), 1.27, 1.27, 1.87 , 4 , 0.68 ),
(78, "Platinium", "Pt", ( 0.81, 0.81, 0.87), 1.30, 1.30, 1.83 , 2 , 0.80 , 4 , 0.65 ),
(79, "Gold", "Au", ( 1.0, 0.81, 0.13), 1.34, 1.34, 1.79 , 1 , 1.37 , 3 , 0.85 ),
(80, "Mercury", "Hg", ( 0.72, 0.72, 0.81), 1.49, 1.49, 1.76 , 1 , 1.27 , 2 , 1.10 ),
(81, "Thallium", "Tl", ( 0.65, 0.32, 0.30), 1.48, 1.48, 2.08 , 1 , 1.47 , 3 , 0.95 ),
(82, "Lead", "Pb", ( 0.34, 0.34, 0.38), 1.47, 1.47, 1.81 , 2 , 1.20 , 4 , 0.84 ),
(83, "Bismuth", "Bi", ( 0.61, 0.30, 0.70), 1.46, 1.46, 1.63 , 1 , 0.98 , 3 , 0.96 , 5 , 0.74 ),
(84, "Polonium", "Po", ( 0.67, 0.36, 0.0), 1.46, 1.46, 1.53 , 6 , 0.67 ),
(85, "Astatine", "At", ( 0.45, 0.30, 0.27), 1.45, 1.45, 1.43 , -3 , 2.22 , 3 , 0.85 , 5 , 0.46 ),
(86, "Radon", "Rn", ( 0.25, 0.50, 0.58), 1.00, 1.00, 1.34 ),
(87, "Francium", "Fr", ( 0.25, 0.0, 0.4), 1.00, 1.00, 1.00 , 1 , 1.80 ),
(88, "Radium", "Ra", ( 0.0, 0.49, 0.0), 1.00, 1.00, 1.00 , 2 , 1.43 ),
(89, "Actinium", "Ac", ( 0.43, 0.67, 0.98), 1.00, 1.00, 1.00 , 3 , 1.18 ),
(90, "Thorium", "Th", ( 0.0, 0.72, 1.0), 1.65, 1.65, 1.00 , 4 , 1.02 ),
(91, "Protactinium", "Pa", ( 0.0, 0.63, 1.0), 1.00, 1.00, 1.00 , 3 , 1.13 , 4 , 0.98 , 5 , 0.89 ),
(92, "Uranium", "U", ( 0.0, 0.56, 1.0), 1.42, 1.42, 1.00 , 4 , 0.97 , 6 , 0.80 ),
(93, "Neptunium", "Np", ( 0.0, 0.50, 1.0), 1.00, 1.00, 1.00 , 3 , 1.10 , 4 , 0.95 , 7 , 0.71 ),
(94, "Plutonium", "Pu", ( 0.0, 0.41, 1.0), 1.00, 1.00, 1.00 , 3 , 1.08 , 4 , 0.93 ),
(95, "Americium", "Am", ( 0.32, 0.36, 0.94), 1.00, 1.00, 1.00 , 3 , 1.07 , 4 , 0.92 ),
(96, "Curium", "Cm", ( 0.47, 0.36, 0.89), 1.00, 1.00, 1.00 ),
(97, "Berkelium", "Bk", ( 0.54, 0.30, 0.89), 1.00, 1.00, 1.00 ),
(98, "Californium", "Cf", ( 0.63, 0.21, 0.83), 1.00, 1.00, 1.00 ),
(99, "Einsteinium", "Es", ( 0.70, 0.12, 0.83), 1.00, 1.00, 1.00 ),
(100, "Fermium", "Fm", ( 0.70, 0.12, 0.72), 1.00, 1.00, 1.00 ),
(101, "Mendelevium", "Md", ( 0.70, 0.05, 0.65), 1.00, 1.00, 1.00 ),
(102, "Nobelium", "No", ( 0.74, 0.05, 0.52), 1.00, 1.00, 1.00 ),
(103, "Lawrencium", "Lr", ( 0.78, 0.0, 0.4), 1.00, 1.00, 1.00 ),
(104, "Vacancy", "Vac", ( 0.5, 0.5, 0.5), 1.00, 1.00, 1.00),
(105, "Default", "Default", ( 1.0, 1.0, 1.0), 1.00, 1.00, 1.00),
(106, "Stick", "Stick", ( 0.5, 0.5, 0.5), 1.00, 1.00, 1.00),
)
# This list here contains all data of the elements and will be used during
# runtime. It is a list of classes.
# During executing Atomic Blender, the list will be initialized with the fixed
# data from above via the class structure below (CLASS_atom_pdb_Elements). We
# have then one fixed list (above), which will never be changed, and a list of
# classes with same data. The latter can be modified via loading a separate
# custom data file.
ATOM_CLUSTER_ELEMENTS = []
ATOM_CLUSTER_ALL_ATOMS = []
# This is the class, which stores the properties for one element.
class CLASS_atom_cluster_Elements(object):
__slots__ = ('number', 'name', 'short_name', 'color', 'radii', 'radii_ionic')
def __init__(self, number, name, short_name, color, radii, radii_ionic):
self.number = number
self.name = name
self.short_name = short_name
self.color = color
self.radii = radii
self.radii_ionic = radii_ionic
# This is the class, which stores the properties of one atom.
class CLASS_atom_cluster_atom(object):
__slots__ = ('location')
def __init__(self, location):
self.location = location
# -----------------------------------------------------------------------------
# Read atom data
def DEF_atom_read_atom_data():
del ATOM_CLUSTER_ELEMENTS[:]
for item in ATOM_CLUSTER_ELEMENTS_DEFAULT:
# All three radii into a list
radii = [item[4],item[5],item[6]]
# The handling of the ionic radii will be done later. So far, it is an
# empty list.
radii_ionic = []
li = CLASS_atom_cluster_Elements(item[0],item[1],item[2],item[3],
radii,radii_ionic)
ATOM_CLUSTER_ELEMENTS.append(li)
# -----------------------------------------------------------------------------
# Routines for shapes
def vec_in_sphere(atom_pos,size, skin):
regular = True
inner = True
if atom_pos.length > size/2.0:
regular = False
if atom_pos.length < (size/2.0)*(1-skin):
inner = False
return (regular, inner)
def vec_in_parabole(atom_pos, height, diameter):
regular = True
inner = True
px = atom_pos[0]
py = atom_pos[1]
pz = atom_pos[2] + height/2.0
a = diameter / sqrt(4 * height)
if pz < 0.0:
return (False, False)
if px == 0.0 and py == 0.0:
return (True, True)
if py == 0.0:
y = 0.0
x = a * a * pz / px
z = x * x / (a * a)
else:
y = pz * py * a * a / (px*px + py*py)
x = y * px / py
z = (x*x + y*y) / (a * a)
if( atom_pos.length > sqrt(x*x+y*y+z*z) ):
regular = False
return (regular, inner)
def vec_in_pyramide_square(atom_pos, size, skin):
"""
Please, if possible leave all this! The code documents the
mathemetical way of cutting a pyramide with square base.
P1 = Vector((-size/2, 0.0, -size/4))
P2 = Vector((0.0, -size/2, -size/4))
P4 = Vector((size/2, 0.0, -size/4))
P5 = Vector((0.0, size/2, -size/4))
P6 = Vector((0.0, 0.0, size/4))
# First face
v11 = P1 - P2
v12 = P1 - P6
n1 = v11.cross(v12)
g1 = -n1 * P1
# Second face
v21 = P6 - P4
v22 = P6 - P5
n2 = v21.cross(v22)
g2 = -n2 * P6
# Third face
v31 = P1 - P5
v32 = P1 - P6
n3 = v32.cross(v31)
g3 = -n3 * P1
# Forth face
v41 = P6 - P2
v42 = P2 - P4
n4 = v41.cross(v42)
g4 = -n4 * P2
# Fith face, base
v51 = P2 - P1
v52 = P2 - P4
n5 = v51.cross(v52)
g5 = -n5 * P2
"""
# A much faster way for calculation:
size2 = size * size
size3 = size2 * size
n1 = Vector((-1/4, -1/4, 1/4)) * size2
g1 = -1/16 * size3
n2 = Vector(( 1/4, 1/4, 1/4)) * size2
g2 = g1
n3 = Vector((-1/4, 1/4, 1/4)) * size2
g3 = g1
n4 = Vector(( 1/4, -1/4, 1/4)) * size2
g4 = g1
n5 = Vector(( 0.0, 0.0, -1/2)) * size2
g5 = -1/8 * size3
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
distance_plane_5 = abs((n5 * atom_pos - g5)/n5.length)
on_plane_5 = (atom_pos - n5 * (distance_plane_5/n5.length)).length
regular = True
inner = True
if(atom_pos.length > on_plane_1):
regular = False
if(atom_pos.length > on_plane_2):
regular = False
if(atom_pos.length > on_plane_3):
regular = False
if(atom_pos.length > on_plane_4):
regular = False
if(atom_pos.length > on_plane_5):
regular = False
if skin == 1.0:
return (regular, inner)
size = size * (1.0 - skin)
size2 = size * size
size3 = size2 * size
n1 = Vector((-1/4, -1/4, 1/4)) * size2
g1 = -1/16 * size3
n2 = Vector(( 1/4, 1/4, 1/4)) * size2
g2 = g1
n3 = Vector((-1/4, 1/4, 1/4)) * size2
g3 = g1
n4 = Vector(( 1/4, -1/4, 1/4)) * size2
g4 = g1
n5 = Vector(( 0.0, 0.0, -1/2)) * size2
g5 = -1/8 * size3
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
distance_plane_5 = abs((n5 * atom_pos - g5)/n5.length)
on_plane_5 = (atom_pos - n5 * (distance_plane_5/n5.length)).length
inner = False
if(atom_pos.length > on_plane_1):
inner = True
if(atom_pos.length > on_plane_2):
inner = True
if(atom_pos.length > on_plane_3):
inner = True
if(atom_pos.length > on_plane_4):
inner = True
if(atom_pos.length > on_plane_5):
inner = True
return (regular, inner)
def vec_in_pyramide_hex_abc(atom_pos, size, skin):
a = size/2.0
#c = size/2.0*cos((30/360)*2.0*pi)
c = size * 0.4330127020
#s = size/2.0*sin((30/360)*2.0*pi)
s = size * 0.25
#h = 2.0 * (sqrt(6.0)/3.0) * c
h = 1.632993162 * c
"""
Please, if possible leave all this! The code documents the
mathemetical way of cutting a tetraeder.
P1 = Vector((0.0, a, 0.0))
P2 = Vector(( -c, -s, 0.0))
P3 = Vector(( c, -s, 0.0))
P4 = Vector((0.0, 0.0, h))
C = (P1+P2+P3+P4)/4.0
P1 = P1 - C
P2 = P2 - C
P3 = P3 - C
P4 = P4 - C
# First face
v11 = P1 - P2
v12 = P1 - P4
n1 = v11.cross(v12)
g1 = -n1 * P1
# Second face
v21 = P2 - P3
v22 = P2 - P4
n2 = v21.cross(v22)
g2 = -n2 * P2
# Third face
v31 = P3 - P1
v32 = P3 - P4
n3 = v31.cross(v32)
g3 = -n3 * P3
# Forth face
v41 = P2 - P1
v42 = P2 - P3
n4 = v41.cross(v42)
g4 = -n4 * P1
"""
n1 = Vector(( -h*(a+s), c*h, c*a ))
g1 = -1/2*c*(a*h+s*h)
n2 = Vector(( 0, -2*c*h, 2*c*s ))
g2 = -1/2*c*(a*h+s*h)
n3 = Vector(( h*(a+s), c*h, a*c ))
g3 = -1/2*c*(a*h+s*h)
n4 = Vector(( 0, 0, -2*c*(s+a) ))
g4 = -1/2*h*c*(s+a)
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
regular = True
inner = True
if(atom_pos.length > on_plane_1):
regular = False
if(atom_pos.length > on_plane_2):
regular = False
if(atom_pos.length > on_plane_3):
regular = False
if(atom_pos.length > on_plane_4):
regular = False
if skin == 1.0:
return (regular, inner)
size = size * (1.0 - skin)
a = size/2.0
#c = size/2.0*cos((30/360)*2.0*pi)
c= size * 0.4330127020
#s = size/2.0*sin((30/360)*2.0*pi)
s = size * 0.25
#h = 2.0 * (sqrt(6.0)/3.0) * c
h = 1.632993162 * c
n1 = Vector(( -h*(a+s), c*h, c*a ))
g1 = -1/2*c*(a*h+s*h)
n2 = Vector(( 0, -2*c*h, 2*c*s ))
g2 = -1/2*c*(a*h+s*h)
n3 = Vector(( h*(a+s), c*h, a*c ))
g3 = -1/2*c*(a*h+s*h)
n4 = Vector(( 0, 0, -2*c*(s+a) ))
g4 = -1/2*h*c*(s+a)
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
inner = False
if(atom_pos.length > on_plane_1):
inner = True
if(atom_pos.length > on_plane_2):
inner = True
if(atom_pos.length > on_plane_3):
inner = True
if(atom_pos.length > on_plane_4):
inner = True
return (regular, inner)
def vec_in_octahedron(atom_pos,size, skin):
regular = True
inner = True
"""
Please, if possible leave all this! The code documents the
mathemetical way of cutting an octahedron.
P1 = Vector((-size/2, 0.0, 0.0))
P2 = Vector((0.0, -size/2, 0.0))
P3 = Vector((0.0, 0.0, -size/2))
P4 = Vector((size/2, 0.0, 0.0))
P5 = Vector((0.0, size/2, 0.0))
P6 = Vector((0.0, 0.0, size/2))
# First face
v11 = P2 - P1
v12 = P2 - P3
n1 = v11.cross(v12)
g1 = -n1 * P2
# Second face
v21 = P1 - P5
v22 = P1 - P3
n2 = v21.cross(v22)
g2 = -n2 * P1
# Third face
v31 = P1 - P2
v32 = P1 - P6
n3 = v31.cross(v32)
g3 = -n3 * P1
# Forth face
v41 = P6 - P2
v42 = P2 - P4
n4 = v41.cross(v42)
g4 = -n4 * P2
# Fith face
v51 = P2 - P3
v52 = P2 - P4
n5 = v51.cross(v52)
g5 = -n5 * P2
# Six face
v61 = P6 - P4
v62 = P6 - P5
n6 = v61.cross(v62)
g6 = -n6 * P6
# Seventh face
v71 = P5 - P4
v72 = P5 - P3
n7 = v71.cross(v72)
g7 = -n7 * P5
# Eigth face
v81 = P1 - P5
v82 = P1 - P6
n8 = v82.cross(v81)
g8 = -n8 * P1
"""
# A much faster way for calculation:
size2 = size * size
size3 = size2 * size
n1 = Vector((-1/4, -1/4, -1/4)) * size2
g1 = -1/8 * size3
n2 = Vector((-1/4, 1/4, -1/4)) * size2
g2 = g1
n3 = Vector((-1/4, -1/4, 1/4)) * size2
g3 = g1
n4 = Vector(( 1/4, -1/4, 1/4)) * size2
g4 = g1
n5 = Vector(( 1/4, -1/4, -1/4)) * size2
g5 = g1
n6 = Vector(( 1/4, 1/4, 1/4)) * size2
g6 = g1
n7 = Vector(( 1/4, 1/4, -1/4)) * size2
g7 = g1
n8 = Vector((-1/4, 1/4, 1/4)) * size2
g8 = g1
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
distance_plane_5 = abs((n5 * atom_pos - g5)/n5.length)
on_plane_5 = (atom_pos - n5 * (distance_plane_5/n5.length)).length
distance_plane_6 = abs((n6 * atom_pos - g6)/n6.length)
on_plane_6 = (atom_pos - n6 * (distance_plane_6/n6.length)).length
distance_plane_7 = abs((n7 * atom_pos - g7)/n7.length)
on_plane_7 = (atom_pos - n7 * (distance_plane_7/n7.length)).length
distance_plane_8 = abs((n8 * atom_pos - g8)/n8.length)
on_plane_8 = (atom_pos - n8 * (distance_plane_8/n8.length)).length
if(atom_pos.length > on_plane_1):
regular = False
if(atom_pos.length > on_plane_2):
regular = False
if(atom_pos.length > on_plane_3):
regular = False
if(atom_pos.length > on_plane_4):
regular = False
if(atom_pos.length > on_plane_5):
regular = False
if(atom_pos.length > on_plane_6):
regular = False
if(atom_pos.length > on_plane_7):
regular = False
if(atom_pos.length > on_plane_8):
regular = False
if skin == 1.0:
return (regular, inner)
size = size * (1.0 - skin)
size2 = size * size
size3 = size2 * size
n1 = Vector((-1/4, -1/4, -1/4)) * size2
g1 = -1/8 * size3
n2 = Vector((-1/4, 1/4, -1/4)) * size2
g2 = g1
n3 = Vector((-1/4, -1/4, 1/4)) * size2
g3 = g1
n4 = Vector(( 1/4, -1/4, 1/4)) * size2
g4 = g1
n5 = Vector(( 1/4, -1/4, -1/4)) * size2
g5 = g1
n6 = Vector(( 1/4, 1/4, 1/4)) * size2
g6 = g1
n7 = Vector(( 1/4, 1/4, -1/4)) * size2
g7 = g1
n8 = Vector((-1/4, 1/4, 1/4)) * size2
g8 = g1
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
distance_plane_5 = abs((n5 * atom_pos - g5)/n5.length)
on_plane_5 = (atom_pos - n5 * (distance_plane_5/n5.length)).length
distance_plane_6 = abs((n6 * atom_pos - g6)/n6.length)
on_plane_6 = (atom_pos - n6 * (distance_plane_6/n6.length)).length
distance_plane_7 = abs((n7 * atom_pos - g7)/n7.length)
on_plane_7 = (atom_pos - n7 * (distance_plane_7/n7.length)).length
distance_plane_8 = abs((n8 * atom_pos - g8)/n8.length)
on_plane_8 = (atom_pos - n8 * (distance_plane_8/n8.length)).length
inner = False
if(atom_pos.length > on_plane_1):
inner = True
if(atom_pos.length > on_plane_2):
inner = True
if(atom_pos.length > on_plane_3):
inner = True
if(atom_pos.length > on_plane_4):
inner = True
if(atom_pos.length > on_plane_5):
inner = True
if(atom_pos.length > on_plane_6):
inner = True
if(atom_pos.length > on_plane_7):
inner = True
if(atom_pos.length > on_plane_8):
inner = True
return (regular, inner)
def vec_in_truncated_octahedron(atom_pos,size, skin):
regular = True
inner = True
# The normal octahedron
size2 = size * size
size3 = size2 * size
n1 = Vector((-1/4, -1/4, -1/4)) * size2
g1 = -1/8 * size3
n2 = Vector((-1/4, 1/4, -1/4)) * size2
g2 = g1
n3 = Vector((-1/4, -1/4, 1/4)) * size2
g3 = g1
n4 = Vector(( 1/4, -1/4, 1/4)) * size2
g4 = g1
n5 = Vector(( 1/4, -1/4, -1/4)) * size2
g5 = g1
n6 = Vector(( 1/4, 1/4, 1/4)) * size2
g6 = g1
n7 = Vector(( 1/4, 1/4, -1/4)) * size2
g7 = g1
n8 = Vector((-1/4, 1/4, 1/4)) * size2
g8 = g1
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
distance_plane_5 = abs((n5 * atom_pos - g5)/n5.length)
on_plane_5 = (atom_pos - n5 * (distance_plane_5/n5.length)).length
distance_plane_6 = abs((n6 * atom_pos - g6)/n6.length)
on_plane_6 = (atom_pos - n6 * (distance_plane_6/n6.length)).length
distance_plane_7 = abs((n7 * atom_pos - g7)/n7.length)
on_plane_7 = (atom_pos - n7 * (distance_plane_7/n7.length)).length
distance_plane_8 = abs((n8 * atom_pos - g8)/n8.length)
on_plane_8 = (atom_pos - n8 * (distance_plane_8/n8.length)).length
# Here are the 6 additional faces
# pp = (size/2.0) - (sqrt(2.0)/2.0) * ((size/sqrt(2.0))/3.0)
pp = size / 3.0
n_1 = Vector((1.0,0.0,0.0))
n_2 = Vector((-1.0,0.0,0.0))
n_3 = Vector((0.0,1.0,0.0))
n_4 = Vector((0.0,-1.0,0.0))
n_5 = Vector((0.0,0.0,1.0))
n_6 = Vector((0.0,0.0,-1.0))
distance_plane_1b = abs((n_1 * atom_pos + pp)/n_1.length)
on_plane_1b = (atom_pos - n_1 * (distance_plane_1b/n_1.length)).length
distance_plane_2b = abs((n_2 * atom_pos + pp)/n_2.length)
on_plane_2b = (atom_pos - n_2 * (distance_plane_2b/n_2.length)).length
distance_plane_3b = abs((n_3 * atom_pos + pp)/n_3.length)
on_plane_3b = (atom_pos - n_3 * (distance_plane_3b/n_3.length)).length
distance_plane_4b = abs((n_4 * atom_pos + pp)/n_4.length)
on_plane_4b = (atom_pos - n_4 * (distance_plane_4b/n_4.length)).length
distance_plane_5b = abs((n_5 * atom_pos + pp)/n_5.length)
on_plane_5b = (atom_pos - n_5 * (distance_plane_5b/n_5.length)).length
distance_plane_6b = abs((n_6 * atom_pos + pp)/n_6.length)
on_plane_6b = (atom_pos - n_6 * (distance_plane_6b/n_6.length)).length
if(atom_pos.length > on_plane_1):
regular = False
if(atom_pos.length > on_plane_2):
regular = False
if(atom_pos.length > on_plane_3):
regular = False
if(atom_pos.length > on_plane_4):
regular = False
if(atom_pos.length > on_plane_5):
regular = False
if(atom_pos.length > on_plane_6):
regular = False
if(atom_pos.length > on_plane_7):
regular = False
if(atom_pos.length > on_plane_8):
regular = False
if(atom_pos.length > on_plane_1b):
regular = False
if(atom_pos.length > on_plane_2b):
regular = False
if(atom_pos.length > on_plane_3b):
regular = False
if(atom_pos.length > on_plane_4b):
regular = False
if(atom_pos.length > on_plane_5b):
regular = False
if(atom_pos.length > on_plane_6b):
regular = False
if skin == 1.0:
return (regular, inner)
size = size * (1.0 - skin)
# The normal octahedron
size2 = size * size
size3 = size2 * size
n1 = Vector((-1/4, -1/4, -1/4)) * size2
g1 = -1/8 * size3
n2 = Vector((-1/4, 1/4, -1/4)) * size2
g2 = g1
n3 = Vector((-1/4, -1/4, 1/4)) * size2
g3 = g1
n4 = Vector(( 1/4, -1/4, 1/4)) * size2
g4 = g1
n5 = Vector(( 1/4, -1/4, -1/4)) * size2
g5 = g1
n6 = Vector(( 1/4, 1/4, 1/4)) * size2
g6 = g1
n7 = Vector(( 1/4, 1/4, -1/4)) * size2
g7 = g1
n8 = Vector((-1/4, 1/4, 1/4)) * size2
g8 = g1
distance_plane_1 = abs((n1 * atom_pos - g1)/n1.length)
on_plane_1 = (atom_pos - n1 * (distance_plane_1/n1.length)).length
distance_plane_2 = abs((n2 * atom_pos - g2)/n2.length)
on_plane_2 = (atom_pos - n2 * (distance_plane_2/n2.length)).length
distance_plane_3 = abs((n3 * atom_pos - g3)/n3.length)
on_plane_3 = (atom_pos - n3 * (distance_plane_3/n3.length)).length
distance_plane_4 = abs((n4 * atom_pos - g4)/n4.length)
on_plane_4 = (atom_pos - n4 * (distance_plane_4/n4.length)).length
distance_plane_5 = abs((n5 * atom_pos - g5)/n5.length)
on_plane_5 = (atom_pos - n5 * (distance_plane_5/n5.length)).length
distance_plane_6 = abs((n6 * atom_pos - g6)/n6.length)
on_plane_6 = (atom_pos - n6 * (distance_plane_6/n6.length)).length
distance_plane_7 = abs((n7 * atom_pos - g7)/n7.length)
on_plane_7 = (atom_pos - n7 * (distance_plane_7/n7.length)).length
distance_plane_8 = abs((n8 * atom_pos - g8)/n8.length)
on_plane_8 = (atom_pos - n8 * (distance_plane_8/n8.length)).length
# Here are the 6 additional faces
# pp = (size/2.0) - (sqrt(2.0)/2.0) * ((size/sqrt(2.0))/3.0)
pp = size / 3.0
n_1 = Vector((1.0,0.0,0.0))
n_2 = Vector((-1.0,0.0,0.0))
n_3 = Vector((0.0,1.0,0.0))
n_4 = Vector((0.0,-1.0,0.0))
n_5 = Vector((0.0,0.0,1.0))
n_6 = Vector((0.0,0.0,-1.0))
distance_plane_1b = abs((n_1 * atom_pos + pp)/n_1.length)
on_plane_1b = (atom_pos - n_1 * (distance_plane_1b/n_1.length)).length
distance_plane_2b = abs((n_2 * atom_pos + pp)/n_2.length)
on_plane_2b = (atom_pos - n_2 * (distance_plane_2b/n_2.length)).length
distance_plane_3b = abs((n_3 * atom_pos + pp)/n_3.length)
on_plane_3b = (atom_pos - n_3 * (distance_plane_3b/n_3.length)).length
distance_plane_4b = abs((n_4 * atom_pos + pp)/n_4.length)
on_plane_4b = (atom_pos - n_4 * (distance_plane_4b/n_4.length)).length
distance_plane_5b = abs((n_5 * atom_pos + pp)/n_5.length)
on_plane_5b = (atom_pos - n_5 * (distance_plane_5b/n_5.length)).length
distance_plane_6b = abs((n_6 * atom_pos + pp)/n_6.length)
on_plane_6b = (atom_pos - n_6 * (distance_plane_6b/n_6.length)).length
inner = False
if(atom_pos.length > on_plane_1):
inner = True
if(atom_pos.length > on_plane_2):
inner = True
if(atom_pos.length > on_plane_3):
inner = True
if(atom_pos.length > on_plane_4):
inner = True
if(atom_pos.length > on_plane_5):
inner = True
if(atom_pos.length > on_plane_6):
inner = True
if(atom_pos.length > on_plane_7):
inner = True
if(atom_pos.length > on_plane_8):
inner = True
if(atom_pos.length > on_plane_1b):
inner = True
if(atom_pos.length > on_plane_2b):
inner = True
if(atom_pos.length > on_plane_3b):
inner = True
if(atom_pos.length > on_plane_4b):
inner = True
if(atom_pos.length > on_plane_5b):
inner = True
if(atom_pos.length > on_plane_6b):
inner = True
return (regular, inner)
# -----------------------------------------------------------------------------
# Routines for lattices
def create_hexagonal_abcabc_lattice(ctype, size, skin, lattice):
atom_number_total = 0
atom_number_drawn = 0
y_displ = 0
z_displ = 0
"""
e = (1/sqrt(2.0)) * lattice
f = sqrt(3.0/4.0) * e
df1 = (e/2.0) * tan((30.0/360.0)*2.0*pi)
df2 = (e/2.0) / cos((30.0/360.0)*2.0*pi)
g = sqrt(2.0/3.0) * e
"""
e = 0.7071067810 * lattice
f = 0.8660254038 * e
df1 = 0.2886751348 * e
df2 = 0.5773502690 * e
g = 0.8164965810 * e
if ctype == "parabolid_abc":
# size = height, skin = diameter
number_x = int(skin/(2*e))+4
number_y = int(skin/(2*f))+4
number_z = int(size/(2*g))
else:
number_x = int(size/(2*e))+4
number_y = int(size/(2*f))+4
number_z = int(size/(2*g))+1+4
for k in range(-number_z,number_z+1):
for j in range(-number_y,number_y+1):
for i in range(-number_x,number_x+1):
atom = Vector((float(i)*e,float(j)*f,float(k)*g))
if y_displ == 1:
if z_displ == 1:
atom[0] += e/2.0
else:
atom[0] -= e/2.0
if z_displ == 1:
atom[0] -= e/2.0
atom[1] += df1
if z_displ == 2:
atom[0] += 0.0
atom[1] += df2
if ctype == "sphere_hex_abc":
message = vec_in_sphere(atom, size, skin)
elif ctype == "pyramide_hex_abc":
# size = height, skin = diameter
message = vec_in_pyramide_hex_abc(atom, size, skin)
elif ctype == "parabolid_abc":
message = vec_in_parabole(atom, size, skin)
if message[0] == True and message[1] == True:
atom_add = CLASS_atom_cluster_atom(atom)
ATOM_CLUSTER_ALL_ATOMS.append(atom_add)
atom_number_total += 1
atom_number_drawn += 1
if message[0] == True and message[1] == False:
atom_number_total += 1
if y_displ == 1:
y_displ = 0
else:
y_displ = 1
y_displ = 0
if z_displ == 0:
z_displ = 1
elif z_displ == 1:
z_displ = 2
else:
z_displ = 0
print("Atom positions calculated")
return (atom_number_total, atom_number_drawn)
def create_hexagonal_abab_lattice(ctype, size, skin, lattice):
atom_number_total = 0
atom_number_drawn = 0
y_displ = "even"
z_displ = "even"
"""
e = (1/sqrt(2.0)) * lattice
f = sqrt(3.0/4.0) * e
df = (e/2.0) * tan((30.0/360.0)*2*pi)
g = sqrt(2.0/3.0) * e
"""
e = 0.7071067814 * lattice
f = 0.8660254038 * e
df = 0.2886751348 * e
g = 0.8164965810 * e
if ctype == "parabolid_ab":
# size = height, skin = diameter
number_x = int(skin/(2*e))+4
number_y = int(skin/(2*f))+4
number_z = int(size/(2*g))
else:
number_x = int(size/(2*e))+4
number_y = int(size/(2*f))+4
number_z = int(size/(2*g))+1+4
for k in range(-number_z,number_z+1):
for j in range(-number_y,number_y+1):
for i in range(-number_x,number_x+1):
atom = Vector((float(i)*e,float(j)*f,float(k)*g))
if "odd" in y_displ:
if "odd" in z_displ:
atom[0] += e/2.0
else:
atom[0] -= e/2.0
if "odd" in z_displ:
atom[0] -= e/2.0
atom[1] += df
if ctype == "sphere_hex_ab":
message = vec_in_sphere(atom, size, skin)
elif ctype == "parabolid_ab":
# size = height, skin = diameter
message = vec_in_parabole(atom, size, skin)
if message[0] == True and message[1] == True:
atom_add = CLASS_atom_cluster_atom(atom)
ATOM_CLUSTER_ALL_ATOMS.append(atom_add)
atom_number_total += 1
atom_number_drawn += 1
if message[0] == True and message[1] == False:
atom_number_total += 1
if "even" in y_displ:
y_displ = "odd"
else:
y_displ = "even"
y_displ = "even"
if "even" in z_displ:
z_displ = "odd"
else:
z_displ = "even"
print("Atom positions calculated")
return (atom_number_total, atom_number_drawn)
def create_square_lattice(ctype, size, skin, lattice):
atom_number_total = 0
atom_number_drawn = 0
if ctype == "parabolid_square":
# size = height, skin = diameter
number_k = int(size/(2.0*lattice))
number_j = int(skin/(2.0*lattice)) + 5
number_i = int(skin/(2.0*lattice)) + 5
else:
number_k = int(size/(2.0*lattice))
number_j = int(size/(2.0*lattice))
number_i = int(size/(2.0*lattice))
for k in range(-number_k,number_k+1):
for j in range(-number_j,number_j+1):
for i in range(-number_i,number_i+1):
atom = Vector((float(i),float(j),float(k))) * lattice
if ctype == "sphere_square":
message = vec_in_sphere(atom, size, skin)
elif ctype == "pyramide_square":
message = vec_in_pyramide_square(atom, size, skin)
elif ctype == "parabolid_square":
# size = height, skin = diameter
message = vec_in_parabole(atom, size, skin)
elif ctype == "octahedron":
message = vec_in_octahedron(atom, size, skin)
elif ctype == "truncated_octahedron":
message = vec_in_truncated_octahedron(atom,size, skin)
if message[0] == True and message[1] == True:
atom_add = CLASS_atom_cluster_atom(atom)
ATOM_CLUSTER_ALL_ATOMS.append(atom_add)
atom_number_total += 1
atom_number_drawn += 1
if message[0] == True and message[1] == False:
atom_number_total += 1
print("Atom positions calculated")
return (atom_number_total, atom_number_drawn)
# -----------------------------------------------------------------------------
# Routine for the icosahedron
# Note that the icosahedron needs a special treatment since it requires a
# non-common crystal lattice. The faces are (111) facets and the geometry
# is five-fold. So far, a max size of 8217 atoms can be chosen.
# More details about icosahedron shaped clusters can be found in:
#
# 1. C. Mottet, G. Tréglia, B. Legrand, Surface Science 383 (1997) L719-L727
# 2. C. R. Henry, Surface Science Reports 31 (1998) 231-325
# The following code is a translation from an existing Fortran code into Python.
# The Fortran code has been created by Christine Mottet and translated by me
# (Clemens Barth).
# Although a couple of code lines are non-typical for Python, it is best to
# leave the code as is.
#
# To do:
#
# 1. Unlimited cluster size
# 2. Skin effect
def create_icosahedron(size, lattice):
natot = int(1 + (10*size*size+15*size+11)*size/3)
x = list(range(natot+1))
y = list(range(natot+1))
z = list(range(natot+1))
xs = list(range(12+1))
ys = list(range(12+1))
zs = list(range(12+1))
xa = [[[ [] for i in range(12+1)] for j in range(12+1)] for k in range(20+1)]
ya = [[[ [] for i in range(12+1)] for j in range(12+1)] for k in range(20+1)]
za = [[[ [] for i in range(12+1)] for j in range(12+1)] for k in range(20+1)]
naret = [[ [] for i in range(12+1)] for j in range(12+1)]
nfacet = [[[ [] for i in range(12+1)] for j in range(12+1)] for k in range(12+1)]
rac2 = sqrt(2.0)
rac5 = sqrt(5.0)
tdef = (rac5+1.0)/2.0
rapp = sqrt(2.0*(1.0-tdef/(tdef*tdef+1.0)))
nats = 2 * (5*size*size+1)
nat = 13
epsi = 0.01
x[1] = 0.0
y[1] = 0.0
z[1] = 0.0
for i in range(2, 5+1):
z[i] = 0.0
y[i+4] = 0.0
x[i+8] = 0.0
for i in range(2, 3+1):
x[i] = tdef
x[i+2] = -tdef
x[i+4] = 1.0
x[i+6] = -1.0
y[i+8] = tdef
y[i+10] = -tdef
for i in range(2, 4+1, 2):
y[i] = 1.0
y[i+1] = -1.0
z[i+4] = tdef
z[i+5] = -tdef
z[i+8] = 1.0
z[i+9] = -1.0
xdef = rac2 / sqrt(tdef * tdef + 1)
for i in range(2, 13+1):
x[i] = x[i] * xdef / 2.0
y[i] = y[i] * xdef / 2.0
z[i] = z[i] * xdef / 2.0
if size > 1:
for n in range (2, size+1):
ifacet = 0
iaret = 0
inatf = 0
for i in range(1, 12+1):
for j in range(1, 12+1):
naret[i][j] = 0
for k in range (1, 12+1):
nfacet[i][j][k] = 0
nl1 = 6
nl2 = 8
nl3 = 9
k1 = 0
k2 = 0
k3 = 0
k12 = 0
for i in range(1, 12+1):
nat += 1
xs[i] = n * x[i+1]
ys[i] = n * y[i+1]
zs[i] = n * z[i+1]
x[nat] = xs[i]
y[nat] = ys[i]
z[nat] = zs[i]
k1 += 1
for i in range(1, 12+1):
for j in range(2, 12+1):
if j <= i:
continue
xij = xs[j] - xs[i]
yij = ys[j] - ys[i]
zij = zs[j] - zs[i]
xij2 = xij * xij
yij2 = yij * yij
zij2 = zij * zij
dij2 = xij2 + yij2 + zij2
dssn = n * rapp / rac2
dssn2 = dssn * dssn
diffij = abs(dij2-dssn2)
if diffij >= epsi:
continue
for k in range(3, 12+1):
if k <= j:
continue
xjk = xs[k] - xs[j]
yjk = ys[k] - ys[j]
zjk = zs[k] - zs[j]
xjk2 = xjk * xjk
yjk2 = yjk * yjk
zjk2 = zjk * zjk
djk2 = xjk2 + yjk2 + zjk2
diffjk = abs(djk2-dssn2)
if diffjk >= epsi:
continue
xik = xs[k] - xs[i]
yik = ys[k] - ys[i]
zik = zs[k] - zs[i]
xik2 = xik * xik
yik2 = yik * yik
zik2 = zik * zik
dik2 = xik2 + yik2 + zik2
diffik = abs(dik2-dssn2)
if diffik >= epsi:
continue
if nfacet[i][j][k] != 0:
continue
ifacet += 1
nfacet[i][j][k] = ifacet
if naret[i][j] == 0:
iaret += 1
naret[i][j] = iaret
for l in range(1,n-1+1):
nat += 1
xa[i][j][l] = xs[i]+l*(xs[j]-xs[i]) / n
ya[i][j][l] = ys[i]+l*(ys[j]-ys[i]) / n
za[i][j][l] = zs[i]+l*(zs[j]-zs[i]) / n
x[nat] = xa[i][j][l]
y[nat] = ya[i][j][l]
z[nat] = za[i][j][l]
if naret[i][k] == 0:
iaret += 1
naret[i][k] = iaret
for l in range(1, n-1+1):
nat += 1
xa[i][k][l] = xs[i]+l*(xs[k]-xs[i]) / n
ya[i][k][l] = ys[i]+l*(ys[k]-ys[i]) / n
za[i][k][l] = zs[i]+l*(zs[k]-zs[i]) / n
x[nat] = xa[i][k][l]
y[nat] = ya[i][k][l]
z[nat] = za[i][k][l]
if naret[j][k] == 0:
iaret += 1
naret[j][k] = iaret
for l in range(1, n-1+1):
nat += 1
xa[j][k][l] = xs[j]+l*(xs[k]-xs[j]) / n
ya[j][k][l] = ys[j]+l*(ys[k]-ys[j]) / n
za[j][k][l] = zs[j]+l*(zs[k]-zs[j]) / n
x[nat] = xa[j][k][l]
y[nat] = ya[j][k][l]
z[nat] = za[j][k][l]
for l in range(2, n-1+1):
for ll in range(1, l-1+1):
xf = xa[i][j][l]+ll*(xa[i][k][l]-xa[i][j][l]) / l
yf = ya[i][j][l]+ll*(ya[i][k][l]-ya[i][j][l]) / l
zf = za[i][j][l]+ll*(za[i][k][l]-za[i][j][l]) / l
nat += 1
inatf += 1
x[nat] = xf
y[nat] = yf
z[nat] = zf
k3 += 1
atom_number_total = 0
atom_number_drawn = 0
for i in range (1,natot+1):
atom = Vector((x[i],y[i],z[i])) * lattice
atom_add = CLASS_atom_cluster_atom(atom)
ATOM_CLUSTER_ALL_ATOMS.append(atom_add)
atom_number_total += 1
atom_number_drawn += 1
return (atom_number_total, atom_number_drawn)
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/add_mesh_clusters/add_mesh_cluster.py
|
Python
|
gpl-3.0
| 49,648
|
[
"CRYSTAL"
] |
69b948782dfcd39dde0843dacecc9e17b185347d32b371c37b30fe234fd9cfec
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""some various utilities and helper classes, most of them used in the
main pylint class
"""
import re
import sys
import tokenize
from warnings import warn
import os
from os.path import dirname, basename, splitext, exists, isdir, join, normpath
from .logilab.common.interface import implements
from .logilab.common.modutils import modpath_from_file, get_module_files, \
file_from_modpath, load_module_from_file
from .logilab.common.textutils import normalize_text
from .logilab.common.configuration import rest_format_section
from .logilab.common.ureports import Section
from .astroid import nodes, Module
from .interfaces import IRawChecker, ITokenChecker
class UnknownMessage(Exception):
"""raised when a unregistered message id is encountered"""
class EmptyReport(Exception):
"""raised when a report is empty and so should not be displayed"""
MSG_TYPES = {
'I' : 'info',
'C' : 'convention',
'R' : 'refactor',
'W' : 'warning',
'E' : 'error',
'F' : 'fatal'
}
MSG_TYPES_LONG = dict([(v, k) for k, v in MSG_TYPES.iteritems()])
MSG_TYPES_STATUS = {
'I' : 0,
'C' : 16,
'R' : 8,
'W' : 4,
'E' : 2,
'F' : 1
}
_MSG_ORDER = 'EWRCIF'
MSG_STATE_SCOPE_CONFIG = 0
MSG_STATE_SCOPE_MODULE = 1
OPTION_RGX = re.compile(r'\s*#.*\bpylint:(.*)')
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = 'FR'
class WarningScope(object):
LINE = 'line-based-msg'
NODE = 'node-based-msg'
def sort_msgs(msgids):
"""sort message identifiers according to their category first"""
msgs = {}
for msg in msgids:
msgs.setdefault(msg[0], []).append(msg)
result = []
for m_id in _MSG_ORDER:
if m_id in msgs:
result.extend( sorted(msgs[m_id]) )
return result
def get_module_and_frameid(node):
"""return the module name and the frame id in the module"""
frame = node.frame()
module, obj = '', []
while frame:
if isinstance(frame, Module):
module = frame.name
else:
obj.append(getattr(frame, 'name', '<lambda>'))
try:
frame = frame.parent.frame()
except AttributeError:
frame = None
obj.reverse()
return module, '.'.join(obj)
def category_id(id):
id = id.upper()
if id in MSG_TYPES:
return id
return MSG_TYPES_LONG.get(id)
def tokenize_module(module):
stream = module.file_stream
stream.seek(0)
readline = stream.readline
if sys.version_info < (3, 0):
if module.file_encoding is not None:
readline = lambda: stream.readline().decode(module.file_encoding,
'replace')
return list(tokenize.generate_tokens(readline))
return list(tokenize.tokenize(readline))
class MessageDefinition(object):
def __init__(self, checker, msgid, msg, descr, symbol, scope):
assert len(msgid) == 5, 'Invalid message id %s' % msgid
assert msgid[0] in MSG_TYPES, \
'Bad message type %s in %r' % (msgid[0], msgid)
self.msgid = msgid
self.msg = msg
self.descr = descr
self.checker = checker
self.symbol = symbol
self.scope = scope
class MessagesHandlerMixIn(object):
"""a mix-in class containing all the messages related methods for the main
lint class
"""
def __init__(self):
# dictionary of registered messages
self._messages = {}
# dictionary from string symbolic id to Message object.
self._messages_by_symbol = {}
self._msgs_state = {}
self._module_msgs_state = {} # None
self._raw_module_msgs_state = {}
self._msgs_by_category = {}
self.msg_status = 0
self._ignored_msgs = {}
self._suppression_mapping = {}
def register_messages(self, checker):
"""register a dictionary of messages
Keys are message ids, values are a 2-uple with the message type and the
message itself
message ids should be a string of len 4, where the two first characters
are the checker id and the two last the message id in this checker
"""
msgs_dict = checker.msgs
chkid = None
for msgid, msg_tuple in msgs_dict.iteritems():
if implements(checker, (IRawChecker, ITokenChecker)):
scope = WarningScope.LINE
else:
scope = WarningScope.NODE
if len(msg_tuple) > 2:
(msg, msgsymbol, msgdescr) = msg_tuple[:3]
assert msgsymbol not in self._messages_by_symbol, \
'Message symbol %r is already defined' % msgsymbol
if len(msg_tuple) > 3:
if 'scope' in msg_tuple[3]:
scope = msg_tuple[3]['scope']
if 'minversion' in msg_tuple[3]:
minversion = msg_tuple[3]['minversion']
if minversion > sys.version_info:
self._msgs_state[msgid] = False
continue
if 'maxversion' in msg_tuple[3]:
maxversion = msg_tuple[3]['maxversion']
if maxversion <= sys.version_info:
self._msgs_state[msgid] = False
continue
else:
# messages should have a symbol, but for backward compatibility
# they may not.
(msg, msgdescr) = msg_tuple
warn("[pylint 0.26] description of message %s doesn't include "
"a symbolic name" % msgid, DeprecationWarning)
msgsymbol = None
# avoid duplicate / malformed ids
assert msgid not in self._messages, \
'Message id %r is already defined' % msgid
assert chkid is None or chkid == msgid[1:3], \
'Inconsistent checker part in message id %r' % msgid
chkid = msgid[1:3]
msg = MessageDefinition(checker, msgid, msg, msgdescr, msgsymbol, scope)
self._messages[msgid] = msg
self._messages_by_symbol[msgsymbol] = msg
self._msgs_by_category.setdefault(msgid[0], []).append(msgid)
def get_message_help(self, msgid, checkerref=False):
"""return the help string for the given message id"""
msg = self.check_message_id(msgid)
desc = normalize_text(' '.join(msg.descr.split()), indent=' ')
if checkerref:
desc += ' This message belongs to the %s checker.' % \
msg.checker.name
title = msg.msg
if msg.symbol:
symbol_part = ' (%s)' % msg.symbol
else:
symbol_part = ''
if title != '%s':
title = title.splitlines()[0]
return ':%s%s: *%s*\n%s' % (msg.msgid, symbol_part, title, desc)
return ':%s%s:\n%s' % (msg.msgid, symbol_part, desc)
def disable(self, msgid, scope='package', line=None):
"""don't output message of the given id"""
assert scope in ('package', 'module')
# handle disable=all by disabling all categories
if msgid == 'all':
for msgid in MSG_TYPES:
self.disable(msgid, scope, line)
return
# msgid is a category?
catid = category_id(msgid)
if catid is not None:
for _msgid in self._msgs_by_category.get(catid):
self.disable(_msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for _msgid in checker.msgs:
if _msgid in self._messages:
self.disable(_msgid, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.disable_report(msgid)
return
# msgid is a symbolic or numeric msgid.
msg = self.check_message_id(msgid)
if scope == 'module':
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = False
except KeyError:
self._module_msgs_state[msg.msgid] = {line: False}
if msgid != 'I0011':
self.add_message('I0011', line=line, args=msg.msgid)
else:
msgs = self._msgs_state
msgs[msg.msgid] = False
# sync configuration object
self.config.disable_msg = [mid for mid, val in msgs.iteritems()
if not val]
def enable(self, msgid, scope='package', line=None):
"""reenable message of the given id"""
assert scope in ('package', 'module')
catid = category_id(msgid)
# msgid is a category?
if catid is not None:
for msgid in self._msgs_by_category.get(catid):
self.enable(msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for msgid_ in checker.msgs:
self.enable(msgid_, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.enable_report(msgid)
return
# msgid is a symbolic or numeric msgid.
msg = self.check_message_id(msgid)
if scope == 'module':
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = True
except KeyError:
self._module_msgs_state[msg.msgid] = {line: True}
self.add_message('I0012', line=line, args=msg.msgid)
else:
msgs = self._msgs_state
msgs[msg.msgid] = True
# sync configuration object
self.config.enable = [mid for mid, val in msgs.iteritems() if val]
def check_message_id(self, msgid):
"""returns the Message object for this message.
msgid may be either a numeric or symbolic id.
Raises UnknownMessage if the message id is not defined.
"""
if msgid in self._messages_by_symbol:
return self._messages_by_symbol[msgid]
msgid = msgid.upper()
try:
return self._messages[msgid]
except KeyError:
raise UnknownMessage('No such message id %s' % msgid)
def get_msg_display_string(self, msgid):
"""Generates a user-consumable representation of a message.
Can be just the message ID or the ID and the symbol.
"""
return repr(self.check_message_id(msgid).symbol)
def get_message_state_scope(self, msgid, line=None):
"""Returns the scope at which a message was enabled/disabled."""
try:
if line in self._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
def is_message_enabled(self, msgid, line=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if msgid in self._messages_by_symbol:
msgid = self._messages_by_symbol[msgid].msgid
if line is None:
return self._msgs_state.get(msgid, True)
try:
return self._module_msgs_state[msgid][line]
except (KeyError, TypeError):
return self._msgs_state.get(msgid, True)
def handle_ignored_message(self, state_scope, msgid, line, node, args):
"""Report an ignored message.
state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,
depending on whether the message was disabled locally in the module,
or globally. The other arguments are the same as for add_message.
"""
if state_scope == MSG_STATE_SCOPE_MODULE:
try:
orig_line = self._suppression_mapping[(msgid, line)]
self._ignored_msgs.setdefault((msgid, orig_line), set()).add(line)
except KeyError:
pass
def add_message(self, msg_descr, line=None, node=None, args=None):
"""Adds a message given by ID or name.
If provided, the message string is expanded using args
AST checkers should must the node argument (but may optionally
provide line if the line number is different), raw and token checkers
must provide the line argument.
"""
msg_info = self.check_message_id(msg_descr)
msgid = msg_info.msgid
# Fatal messages and reports are special, the node/scope distinction
# does not apply to them.
if msgid[0] not in _SCOPE_EXEMPT:
if msg_info.scope == WarningScope.LINE:
assert node is None and line is not None, (
'Message %s must only provide line, got line=%s, node=%s' % (msgid, line, node))
elif msg_info.scope == WarningScope.NODE:
# Node-based warnings may provide an override line.
assert node is not None, 'Message %s must provide Node, got None'
if line is None and node is not None:
line = node.fromlineno
if hasattr(node, 'col_offset'):
col_offset = node.col_offset # XXX measured in bytes for utf-8, divide by two for chars?
else:
col_offset = None
# should this message be displayed
if not self.is_message_enabled(msgid, line):
self.handle_ignored_message(
self.get_message_state_scope(msgid, line), msgid, line, node, args)
return
# update stats
msg_cat = MSG_TYPES[msgid[0]]
self.msg_status |= MSG_TYPES_STATUS[msgid[0]]
self.stats[msg_cat] += 1
self.stats['by_module'][self.current_name][msg_cat] += 1
try:
self.stats['by_msg'][msgid] += 1
except KeyError:
self.stats['by_msg'][msgid] = 1
# expand message ?
msg = msg_info.msg
if args:
msg %= args
# get module and object
if node is None:
module, obj = self.current_name, ''
path = self.current_file
else:
module, obj = get_module_and_frameid(node)
path = node.root().file
# add the message
self.reporter.add_message(msgid, (path, module, obj, line or 1, col_offset or 0), msg)
def help_message(self, msgids):
"""display help messages for the given message identifiers"""
for msgid in msgids:
try:
print self.get_message_help(msgid, True)
print
except UnknownMessage, ex:
print ex
print
continue
def print_full_documentation(self):
"""output a full documentation in ReST format"""
by_checker = {}
for checker in self.get_checkers():
if checker.name == 'master':
prefix = 'Main '
print "Options"
print '-------\n'
if checker.options:
for section, options in checker.options_by_section():
if section is None:
title = 'General options'
else:
title = '%s options' % section.capitalize()
print title
print '~' * len(title)
rest_format_section(sys.stdout, None, options)
print
else:
try:
by_checker[checker.name][0] += checker.options_and_values()
by_checker[checker.name][1].update(checker.msgs)
by_checker[checker.name][2] += checker.reports
except KeyError:
by_checker[checker.name] = [list(checker.options_and_values()),
dict(checker.msgs),
list(checker.reports)]
for checker, (options, msgs, reports) in by_checker.iteritems():
prefix = ''
title = '%s checker' % checker
print title
print '-' * len(title)
print
if options:
title = 'Options'
print title
print '~' * len(title)
rest_format_section(sys.stdout, None, options)
print
if msgs:
title = ('%smessages' % prefix).capitalize()
print title
print '~' * len(title)
for msgid in sort_msgs(msgs.iterkeys()):
print self.get_message_help(msgid, False)
print
if reports:
title = ('%sreports' % prefix).capitalize()
print title
print '~' * len(title)
for report in reports:
print ':%s: %s' % report[:2]
print
print
def list_messages(self):
"""output full messages list documentation in ReST format"""
msgids = []
for msgid in self._messages:
msgids.append(msgid)
msgids.sort()
for msgid in msgids:
print self.get_message_help(msgid, False)
print
class ReportsHandlerMixIn(object):
"""a mix-in class containing all the reports and stats manipulation
related methods for the main lint class
"""
def __init__(self):
self._reports = {}
self._reports_state = {}
def register_report(self, reportid, r_title, r_cb, checker):
"""register a report
reportid is the unique identifier for the report
r_title the report's title
r_cb the method to call to make the report
checker is the checker defining the report
"""
reportid = reportid.upper()
self._reports.setdefault(checker, []).append( (reportid, r_title, r_cb) )
def enable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = True
def disable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = False
def report_is_enabled(self, reportid):
"""return true if the report associated to the given identifier is
enabled
"""
return self._reports_state.get(reportid, True)
def make_reports(self, stats, old_stats):
"""render registered reports"""
sect = Section('Report',
'%s statements analysed.'% (self.stats['statement']))
for checker in self._reports:
for reportid, r_title, r_cb in self._reports[checker]:
if not self.report_is_enabled(reportid):
continue
report_sect = Section(r_title)
try:
r_cb(report_sect, stats, old_stats)
except EmptyReport:
continue
report_sect.report_id = reportid
sect.append(report_sect)
return sect
def add_stats(self, **kwargs):
"""add some stats entries to the statistic dictionary
raise an AssertionError if there is a key conflict
"""
for key, value in kwargs.iteritems():
if key[-1] == '_':
key = key[:-1]
assert key not in self.stats
self.stats[key] = value
return self.stats
def expand_modules(files_or_modules, black_list):
"""take a list of files/modules/packages and return the list of tuple
(file, module name) which have to be actually checked
"""
result = []
errors = []
for something in files_or_modules:
if exists(something):
# this is a file or a directory
try:
modname = '.'.join(modpath_from_file(something))
except ImportError:
modname = splitext(basename(something))[0]
if isdir(something):
filepath = join(something, '__init__.py')
else:
filepath = something
else:
# suppose it's a module or package
modname = something
try:
filepath = file_from_modpath(modname.split('.'))
if filepath is None:
errors.append( {'key' : 'F0003', 'mod': modname} )
continue
except (ImportError, SyntaxError), ex:
# FIXME p3k : the SyntaxError is a Python bug and should be
# removed as soon as possible http://bugs.python.org/issue10588
errors.append( {'key': 'F0001', 'mod': modname, 'ex': ex} )
continue
filepath = normpath(filepath)
result.append( {'path': filepath, 'name': modname,
'basepath': filepath, 'basename': modname} )
if not (modname.endswith('.__init__') or modname == '__init__') \
and '__init__.py' in filepath:
for subfilepath in get_module_files(dirname(filepath), black_list):
if filepath == subfilepath:
continue
submodname = '.'.join(modpath_from_file(subfilepath))
result.append( {'path': subfilepath, 'name': submodname,
'basepath': filepath, 'basename': modname} )
return result, errors
class PyLintASTWalker(object):
def __init__(self, linter):
# callbacks per node types
self.nbstatements = 1
self.visit_events = {}
self.leave_events = {}
self.linter = linter
def _is_method_enabled(self, method):
if not hasattr(method, 'checks_msgs'):
return True
for msg_desc in method.checks_msgs:
if self.linter.is_message_enabled(msg_desc):
return True
return False
def add_checker(self, checker):
"""walk to the checker's dir and collect visit and leave methods"""
# XXX : should be possible to merge needed_checkers and add_checker
vcids = set()
lcids = set()
visits = self.visit_events
leaves = self.leave_events
for member in dir(checker):
cid = member[6:]
if cid == 'default':
continue
if member.startswith('visit_'):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if self._is_method_enabled(v_meth):
visits.setdefault(cid, []).append(v_meth)
vcids.add(cid)
elif member.startswith('leave_'):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if self._is_method_enabled(l_meth):
leaves.setdefault(cid, []).append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, 'visit_default', None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits.setdefault(cid, []).append(visit_default)
# for now we have no "leave_default" method in Pylint
def walk(self, astroid):
"""call visit events of astroid checkers for the given node, recurse on
its children, then leave events.
"""
cid = astroid.__class__.__name__.lower()
if astroid.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for cb in self.visit_events.get(cid, ()):
cb(astroid)
# recurse on children
for child in astroid.get_children():
self.walk(child)
for cb in self.leave_events.get(cid, ()):
cb(astroid)
PY_EXTS = ('.py', '.pyc', '.pyo', '.pyw', '.so', '.dll')
def register_plugins(linter, directory):
"""load all module and package in the given directory, looking for a
'register' function in each one, used to register pylint checkers
"""
imported = {}
for filename in os.listdir(directory):
base, extension = splitext(filename)
if base in imported or base == '__pycache__':
continue
if extension in PY_EXTS and base != '__init__' or (
not extension and isdir(join(directory, base))):
try:
module = load_module_from_file(join(directory, filename))
except ValueError:
# empty module name (usually emacs auto-save files)
continue
except ImportError, exc:
print >> sys.stderr, "Problem importing module %s: %s" % (filename, exc)
else:
if hasattr(module, 'register'):
module.register(linter)
imported[base] = 1
|
lukaszpiotr/pylama_with_gjslint
|
pylama/checkers/pylint/utils.py
|
Python
|
lgpl-3.0
| 26,254
|
[
"VisIt"
] |
80859c3241fe28587e26379bd5099adaa31cb96bcfb2dc11a742a41721d0be0d
|
import os
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shutil
from generate_tool_list_from_ga_workflow_files import generate_tool_list_from_workflow
from string import Template
INVENTORY_FILE_TEMPLATE = '''
[GKSfromWorkflow]
localhost ansible_connection=local
# if remote target, replace the above line by
# <remote host IP> ansible_ssh_user="root" ansible_ssh_private_key_file="~/.ssh/somekey"
'''
GROUP_VAR_FILE_TEMPLATE = '''
galaxy_tools_tool_list_files:
- "extra-files/GKSfromWorkflow/GKSfromWorkflow_tool_list.yml"
galaxy_tools_workflows:
$workflow_list
galaxy_web_processes: 2
additional_files_list:
- { src: "extra-files/galaxy-kickstart/welcome.html", dest: "{{ galaxy_server_dir }}/static/" }
- { src: "extra-files/galaxy-kickstart/galaxy-kickstart_logo.png", dest: "{{ galaxy_server_dir }}/static/images/" }
- { src: "extra-files/tool_sheds_conf.xml", dest: "{{ galaxy_config_dir }}" }
'''
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
usage="python %(prog)s <options>",
epilog="example:\n"
"python %(prog)s -w workflow1 workflow2 -l my_panel_label\n"
"Christophe Antoniewski <drosofff@gmail.com>\n"
"https://github.com/ARTbio/ansible-artimed/tree/master/scritps/galaxykickstart_from_workflow.py")
parser.add_argument('-w', '--workflow',
dest="workflow_files",
required=True,
nargs='+',
help='A space-separated list of galaxy workflow description files in json format', )
parser.add_argument('-l', '--panel_label',
dest='panel_label',
default='Tools from workflows',
help='The name of the panel where the tools will show up in Galaxy.'
'If not specified: "Tools from workflows"')
return parser.parse_args()
def makedir (path):
if os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path)
def make_inventory (file_path="../inventory_files/GKSfromWorkflow"):
if not os.path.exists (file_path):
open(file_path, "w").write(INVENTORY_FILE_TEMPLATE)
else:
print("GKSfromWorkflow inventory file exists, file unchanged")
def make_groupvars (workflow_file_list, file_path="../group_vars/GKSfromWorkflow"):
if os.path.exists(file_path):
print("The GKSfromWorkflow group_vars file already existed and has been overwritten")
internal_workflow_list = []
for workflow in workflow_file_list:
workflow = os.path.basename(workflow)
workflow = ' - "extra-files/GKSfromWorkflow/' + workflow + '"'
internal_workflow_list.append(workflow)
workflow_list = "\n".join(internal_workflow_list)
template_params = {"workflow_list": workflow_list}
config_contents = Template(GROUP_VAR_FILE_TEMPLATE).substitute(template_params)
open(file_path, "w").write(config_contents)
def make_extra_files (workflow_files, panel_label, tool_list_file, extra_files_dir="../extra-files/GKSfromWorkflow"):
if os.path.exists(extra_files_dir):
print("The extra-files/GKSfromWorkflow directory already existed and has been overwritten")
makedir(extra_files_dir)
generate_tool_list_from_workflow(workflow_files, panel_label, tool_list_file)
os.rename(tool_list_file, extra_files_dir + "/" + tool_list_file)
for workflow in workflow_files:
workflow_basename = os.path.basename(workflow)
shutil.copyfile (workflow, extra_files_dir + "/" + workflow_basename)
def create_gks_flavor (workflow_file_list, panel_label, tool_list_file):
"""
creates inventory files in inventory_files folder
creates a group_vars "gks_workflows" in group_vars folder
creates a tool_list.yml files from galaxy workflow files and
copy these files in and extra-files/gks_workflows folder
"""
make_inventory()
make_groupvars(workflow_file_list)
make_extra_files (workflow_file_list, panel_label, tool_list_file)
def main():
options = _parse_cli_options()
create_gks_flavor (options.workflow_files, options.panel_label, "GKSfromWorkflow_tool_list.yml")
if __name__ == "__main__":
main()
|
afgane/GalaxyKickStart
|
scripts/galaxykickstart_from_workflow.py
|
Python
|
gpl-3.0
| 4,495
|
[
"Galaxy"
] |
3d9cce7ab0c36bd1e702850581821d2869a5dec41b8486d3f5a0c4bc0f3369ee
|
# TODO: Add tests based on taxonomy, once we know how to mock mysql.
import six
import platform
from six.moves import builtins
from copy import deepcopy
from json import dumps
from unittest import TestCase
import sqlite3
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from six import StringIO
from Bio import SeqIO
from ..mocking import mockOpen, File
from .sample_data import PARAMS, RECORD0, RECORD1, RECORD2, RECORD3, RECORD4
from dark.reads import Read, Reads, DNARead
from dark.hsp import HSP, LSP
from dark.score import LowerIsBetterScore
from dark.blast.alignments import (
BlastReadsAlignments, ZERO_EVALUE_UPPER_RANDOM_INCREMENT)
from dark.titles import TitlesAlignments
from dark import ncbidb
class TestBlastReadsAlignments(TestCase):
"""
Test the BlastReadsAlignments class.
"""
def testEmptyJSONInput(self):
"""
When a JSON input file is empty, a C{ValueError} must be raised
on trying to read it.
"""
mockOpener = mockOpen()
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
error = "JSON file 'file.json' was empty."
six.assertRaisesRegex(self, ValueError, error,
BlastReadsAlignments, reads, 'file.json')
def testNonJSONInput(self):
"""
When given a file whose contents are not JSON, attempting to
read the BLAST hits from it must raise a C{ValueError}.
"""
pypy = platform.python_implementation() == 'PyPy'
mockOpener = mockOpen(read_data='not JSON\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
if six.PY3:
error = (
"^Could not convert first line of 'file\.json' to JSON "
"\(Expecting value: line 1 column 1 \(char 0\)\)\. "
"Line is 'not JSON'\.$")
else:
if pypy:
error = (
"^Could not convert first line of 'file\.json' to "
"JSON \(Error when decoding null at char 1\)\. Line "
"is 'not JSON'\.$")
else:
error = (
"^Could not convert first line of 'file\.json' to "
"JSON \(No JSON object could be decoded\)\. Line is "
"'not JSON'\.$")
six.assertRaisesRegex(self, ValueError, error,
BlastReadsAlignments, reads, 'file.json')
def testScoreTitle_Bits(self):
"""
The score title must be correct when we are using bit scores.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual('Bit score', readsAlignments.params.scoreTitle)
def testScoreTitle_EValue(self):
"""
The score title must be correct when we are using e values.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
self.assertEqual('$- log_{10}(e)$',
readsAlignments.params.scoreTitle)
def testNucleotidesBlastn(self):
"""
The nucleotide type of the subject must be correct when we are using
blastn.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual('blastn', readsAlignments.params.application)
self.assertTrue(readsAlignments.params.subjectIsNucleotides)
def testNucleotidesTblastx(self):
"""
The nucleotide type of the subject must be correct when we are using
tblastx.
"""
params = deepcopy(PARAMS)
params['application'] = 'tblastx'
mockOpener = mockOpen(read_data=dumps(params) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual('tblastx', readsAlignments.params.application)
self.assertTrue(readsAlignments.params.subjectIsNucleotides)
def testNucleotidesBlastx(self):
"""
The nucleotide type of the subject must be correct when we are using
blastx.
"""
params = deepcopy(PARAMS)
params['application'] = 'blastx'
mockOpener = mockOpen(read_data=dumps(params) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual('blastx', readsAlignments.params.application)
self.assertFalse(readsAlignments.params.subjectIsNucleotides)
def testApplicationParams(self):
"""
BLAST parameters must be extracted from the input JSON file and stored
correctly.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual(PARAMS, readsAlignments.params.applicationParams)
def testJSONParamsButNoHits(self):
"""
When BLAST parameters are present in the input but there are no
records, the __iter__ method of a L{BlastReadsAlignments} instance must
not yield anything.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual([], list(readsAlignments))
def testNotEnoughReads(self):
"""
If a JSON file contains a parameters section and one hit, but there
is no read to go with the hit, a C{ValueError} must be raised.
"""
mockOpener = mockOpen(
read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
error = ("Read generator failed to yield read number 1 during "
"parsing of BLAST file 'file\.json'\.")
readsAlignments = BlastReadsAlignments(reads, 'file.json')
six.assertRaisesRegex(self, ValueError, error, list,
readsAlignments)
def testTooManyReads(self):
"""
If a JSON file contains a parameters section and one hit, but there
is more than one read, a C{ValueError} must be raised.
"""
mockOpener = mockOpen(
read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'G' * 70))
error = ("Reads iterator contained more reads than the number of "
"BLAST records found \(1\)\. First unknown read id is "
"'id1'\.")
readsAlignments = BlastReadsAlignments(reads, 'file.json')
six.assertRaisesRegex(self, ValueError, error, list,
readsAlignments)
def testIncorrectReadId(self):
"""
If the query id of a hit does not match the id of the corresponding
input read, a C{ValueError} must be raised.
"""
mockOpener = mockOpen(
read_data=dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('not id0', 'A' * 70))
error = ("The reads you have provided do not match the BLAST "
"output: BLAST record query id \(id0\) does "
"not match the id of the supposedly corresponding read "
"\(not id0\)\.")
readsAlignments = BlastReadsAlignments(reads, 'file.json')
six.assertRaisesRegex(self, ValueError, error, list,
readsAlignments)
def testOneJSONInput(self):
"""
If a JSON file contains a parameters section and one record, it must
be read correctly.
"""
result = File([dumps(PARAMS) + '\n', dumps(RECORD0) + '\n'])
with patch.object(builtins, 'open') as mockMethod:
mockMethod.return_value = result
reads = Reads()
reads.add(Read('id0', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual(1, len(list(readsAlignments)))
def testTwoJSONInputs(self):
"""
If two JSON files are passed to L{BlastReadsAlignments} each with a
parameters section and one record, both records must be read correctly
and the result should have 2 records.
"""
class SideEffect(object):
def __init__(self):
self.first = True
def sideEffect(self, _ignoredFilename, **kwargs):
if self.first:
self.first = False
return File([dumps(PARAMS) + '\n', dumps(RECORD0) + '\n'])
else:
return File([dumps(PARAMS) + '\n', dumps(RECORD1) + '\n'])
sideEffect = SideEffect()
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, ['file1.json', 'file2.json'])
result = list(readsAlignments)
self.assertEqual(2, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual('id1', result[1].read.id)
def testTwoJSONInputsWithSubjectInCommon(self):
"""
If two JSON files are passed to L{BlastReadsAlignments} with a matched
subject in common, the title should be in the alignments for both
reads.
"""
class SideEffect(object):
def __init__(self):
self.first = True
def sideEffect(self, _ignoredFilename, **kwargs):
if self.first:
self.first = False
return File([dumps(PARAMS) + '\n', dumps(RECORD2) + '\n'])
else:
return File([dumps(PARAMS) + '\n', dumps(RECORD3) + '\n'])
sideEffect = SideEffect()
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
reads = Reads()
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, ['file1.json', 'file2.json'])
result = list(readsAlignments)
self.assertEqual(2, len(result))
self.assertEqual('id2', result[0].read.id)
self.assertEqual('gi|887699|gb|DQ37780 Cowpox virus 15',
result[0][0].subjectTitle)
self.assertEqual('id3', result[1].read.id)
self.assertEqual('gi|887699|gb|DQ37780 Cowpox virus 15',
result[1][0].subjectTitle)
def testThreeJSONInputs(self):
"""
If three JSON files are passed to L{BlastReadsAlignments} with names
that have a numeric prefix and each with a parameters section and one
record, all records must be read correctly and the result should have
3 records in the correct order.
"""
class SideEffect(object):
def __init__(self, test):
self.test = test
self.count = 0
def sideEffect(self, filename, **kwargs):
if self.count == 0:
self.test.assertEqual('1.json', filename)
self.count += 1
return File([dumps(PARAMS) + '\n', dumps(RECORD0) + '\n'])
elif self.count == 1:
self.test.assertEqual('2.json', filename)
self.count += 1
return File([dumps(PARAMS) + '\n', dumps(RECORD1) + '\n'])
else:
self.test.assertEqual('3.json', filename)
return File([dumps(PARAMS) + '\n', dumps(RECORD2) + '\n'])
sideEffect = SideEffect(self)
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
# Note the files are given out of order. Their names will be
# sorted before they are opened. The sorting of the names is
# verified in the SideEffect class, above.
readsAlignments = BlastReadsAlignments(
reads, ['3.json', '1.json', '2.json'])
result = list(readsAlignments)
self.assertEqual(3, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual('id1', result[1].read.id)
self.assertEqual('id2', result[2].read.id)
def testIncompatibleParameters(self):
"""
If two compressed (bz2) JSON files with incompatible parameters
are given to L{BlastReadsAlignments}, a C{ValueError} must be
raised when the files are read.
"""
class SideEffect(object):
def __init__(self):
self.first = True
def sideEffect(self, _ignoredFilename):
if self.first:
self.first = False
return File([dumps(PARAMS) + '\n', dumps(RECORD0) + '\n'])
else:
params = deepcopy(PARAMS)
params['application'] = 'Skype'
return File([dumps(params) + '\n', dumps(RECORD1) + '\n'])
sideEffect = SideEffect()
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
if six.PY3:
error = (
"^Incompatible BLAST parameters found\. The parameters "
"in file2\.json differ from those originally found "
"in file1\.json. Summary of differences:\n\tParam "
"'application' initial value 'BLASTN' differs from "
"later value 'Skype'$")
else:
# Python 2 prints a 'u' before the repr of strings in the error
# message. In Python 3 all strings are unicode.
error = (
"^Incompatible BLAST parameters found\. The parameters "
"in file2\.json differ from those originally found "
"in file1\.json. Summary of differences:\n\tParam "
"u'application' initial value u'BLASTN' differs from "
"later value u'Skype'$")
readsAlignments = BlastReadsAlignments(
reads, ['file1.json', 'file2.json'])
six.assertRaisesRegex(self, ValueError, error, list,
readsAlignments)
def testGetSubjectSequenceBlastdbcmd(self):
"""
The getSubjectSequence function must return a correct C{DNARead}
instance with a 'sequence' attribute that is a string when the
sequence is fetched using ncbidb.getSequence.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
with patch.object(ncbidb, 'getSequence') as mockMethod:
mockMethod.return_value = SeqIO.read(
StringIO('>id1 Description\nAA\n'), 'fasta')
sequence = readsAlignments.getSubjectSequence('title')
self.assertIsInstance(sequence, DNARead)
self.assertIsInstance(sequence.sequence, str)
self.assertEqual('id1 Description', sequence.id)
self.assertEqual('AA', sequence.sequence)
def testGetSubjectSequenceFASTADatabase(self):
"""
The getSubjectSequence function must return the correct C{DNARead}
instance when a FASTA database filename is given to the
BlastReadsAlignments constructor.
"""
class SideEffect(object):
def __init__(self, test):
self.test = test
self.count = 0
def sideEffect(self, filename, mode='r'):
if self.count == 0:
self.test.assertEqual('file.json', filename)
self.count += 1
return File([dumps(PARAMS) + '\n', dumps(RECORD0) + '\n'])
elif self.count == 1:
self.count += 1
return File(['>id1 Description', 'AA\n'])
else:
self.fail('Unexpected third call to open.')
sideEffect = SideEffect(self)
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
reads = Reads()
readsAlignments = BlastReadsAlignments(
reads, 'file.json', databaseFilename='database.fasta')
subject = readsAlignments.getSubjectSequence('id1 Description')
self.assertIsInstance(subject, DNARead)
self.assertIsInstance(subject.sequence, str)
self.assertEqual('id1 Description', subject.id)
self.assertEqual('AA', subject.sequence)
@patch('os.path.exists')
def testGetSubjectSequenceSqliteDatabase(self, existsMock):
"""
The getSubjectSequence function must return the correct C{DNARead}
instance when a FASTA sqlite database filename is given to the
BlastReadsAlignments constructor.
"""
class ConnectSideEffect(object):
def __init__(self):
connection = sqlite3.connect(':memory:')
cur = connection.cursor()
cur.executescript('''
CREATE TABLE files (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR UNIQUE
);
CREATE TABLE sequences (
id VARCHAR UNIQUE PRIMARY KEY,
fileNumber INTEGER,
offset INTEGER
);
''')
cur.execute('INSERT INTO files(name) VALUES (?)',
('xxx.fasta',))
fileNumber = cur.lastrowid
cur.execute(
'INSERT INTO sequences(id, '
'fileNumber, offset) VALUES (?, ?, ?)',
('seqid', fileNumber, 7))
connection.commit()
self.connection = connection
def sideEffect(self, filename):
return self.connection
class OpenSideEffect(object):
def __init__(self, test):
self.test = test
self.count = 0
def sideEffect(self, filename, mode='r'):
if self.count == 0:
self.test.assertEqual('file.json', filename)
self.count += 1
return File([dumps(PARAMS) + '\n', dumps(RECORD0) + '\n'])
elif self.count == 1:
self.test.assertEqual('xxx.fasta', filename)
self.count += 1
return File(['>seqid\n', 'AA\n'])
else:
self.fail('Unexpected third call to open.')
connectSideEffect = ConnectSideEffect()
with patch.object(sqlite3, 'connect') as mockMethod:
mockMethod.side_effect = connectSideEffect.sideEffect
openSideEffect = OpenSideEffect(self)
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = openSideEffect.sideEffect
reads = Reads()
readsAlignments = BlastReadsAlignments(
reads, 'file.json', sqliteDatabaseFilename='dummy')
subject = readsAlignments.getSubjectSequence('seqid')
self.assertIsInstance(subject, DNARead)
self.assertIsInstance(subject.sequence, str)
self.assertEqual('seqid', subject.id)
self.assertEqual('AA', subject.sequence)
def testGetSubjectSequenceThenReverseComplement(self):
"""
It must be possible to call reverseComplement on the return
result of getSubjectSequence and obtain a correct C{DNARead} result.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
with patch.object(ncbidb, 'getSequence') as mockMethod:
mockMethod.return_value = SeqIO.read(
StringIO('>id1 Description\nACGAT\n'), 'fasta')
sequence = readsAlignments.getSubjectSequence('title')
rc = sequence.reverseComplement()
self.assertIsInstance(rc, DNARead)
self.assertIsInstance(rc.sequence, str)
self.assertEqual('id1 Description', rc.id)
self.assertEqual('ATCGT', rc.sequence)
def testHsps(self):
"""
The hsps function must yield the HSPs.
"""
# adjustHspsForPlotting changes HSPs in place, so we pass copied
# records so we don't mess up other tests.
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual(
sorted([HSP(20), HSP(25), HSP(20), HSP(20), HSP(20), HSP(20)]),
sorted(readsAlignments.hsps()))
def testAdjustHspsForPlotting_EValueNoZero(self):
"""
The adjustHspsForPlotting function must alter HSPs so that non-zero
evalues are converted to the positive value of their negative exponent.
"""
result = lambda a, **kwargs: File([
dumps(PARAMS) + '\n', dumps(deepcopy(RECORD0)) + '\n',
dumps(deepcopy(RECORD1)) + '\n', dumps(deepcopy(RECORD2)) + '\n',
dumps(deepcopy(RECORD3)) + '\n'])
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = result
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Cowpox virus 15'
titleAlignments = titlesAlignments[title]
readsAlignments.adjustHspsForPlotting(titleAlignments)
hsps = sorted(titleAlignments.hsps())
self.assertEqual([6.0, 5.0], [hsp.score.score for hsp in hsps])
def testAdjustHspsForPlotting_EValueWithZero(self):
"""
The adjustHspsForPlotting function must alter HSPs so that zero
evalues are set randomly high.
"""
result = lambda a, **kwargs: File([
dumps(PARAMS) + '\n', dumps(deepcopy(RECORD0)) + '\n',
dumps(deepcopy(RECORD1)) + '\n', dumps(deepcopy(RECORD2)) + '\n',
dumps(deepcopy(RECORD3)) + '\n', dumps(deepcopy(RECORD4)) + '\n'])
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = result
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
reads.add(Read('id4', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Cowpox virus 15'
titleAlignments = titlesAlignments[title]
readsAlignments.adjustHspsForPlotting(titleAlignments)
hsps = sorted(titleAlignments.hsps())
# All we really know is that the first HSP will have a randomly
# high value whose bounds we can check. The other values are
# predictable.
self.assertTrue(LSP(6.0 + 2) > hsps[0] >
LSP(6.0 + 2 + ZERO_EVALUE_UPPER_RANDOM_INCREMENT))
self.assertEqual([6.0, 5.0, 3.0, 2.0],
[hsp.score.score for hsp in hsps[1:]])
class TestBlastReadsAlignmentsFiltering(TestCase):
"""
Test the BlastReadsAlignments class filter function.
"""
def testNoResultNoFilteringArgs(self):
"""
If the L{BlastReadsAlignments} filter function is called with no
arguments, and there are no hits, it should produce a generator
that yields no result.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter())
self.assertEqual(0, len(result))
def testOneHitNoFilteringArgs(self):
"""
If the L{BlastReadsAlignments} filter function is called with no
arguments, and there is one hit, it should produce a generator that
yields that hit.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(RECORD0) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter())
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
def testLimitZero(self):
"""
If L{BlastReadsAlignments} is limited to zero result, that limit must
be respected.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(RECORD0) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(limit=0))
self.assertEqual(0, len(result))
def testLimitOne(self):
"""
If L{BlastReadsAlignments} is limited to one hit, that limit must
be respected.
"""
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(limit=1))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
def testOneAlignmentPerRead(self):
"""
If L{BlastReadsAlignments} is asked to deliver only the best alignment
for each read, that must be respected.
"""
record = {
"query": "H6E8I1T01BFUH9",
"alignments": [
{
"length": 2885,
"hsps": [
{
"sbjct_end": 2506,
"expect": 1.25854e-43,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 182.092,
"query_start": 362
}
],
"title": "Merkel1"
},
{
"length": 2220,
"hsps": [
{
"sbjct_end": 1841,
"expect": 1.25854e-43,
"sbjct": "AATCCAGGGAATCTAATAAAATAATCAA",
"sbjct_start": 1942,
"query": "AATCCAGGGAATCTTAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 180,
"query_start": 362
}
],
"title":"Merkel2"
}
]
}
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(record) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('H6E8I1T01BFUH9', 'A' * 500))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(oneAlignmentPerRead=True))
self.assertEqual(1, len(result))
self.assertEqual(1, len(result[0]))
self.assertEqual('Merkel1', result[0][0].subjectTitle)
def testMaxZeroAlignmentsPerRead(self):
"""
If L{BlastReadsAlignments} is asked to deliver only reads that have at
most zero alignments, a read with no alignments must be allowed
through but a read with one alignment must be filtered out.
"""
record1 = {
"query": "read1",
"alignments": [],
}
record2 = {
"query": "read2",
"alignments": [
{
"length": 2885,
"hsps": [
{
"sbjct_end": 2506,
"expect": 1.25854e-43,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 182.092,
"query_start": 362
}
],
"title": "Merkel1"
},
],
}
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(record1) + '\n' +
dumps(record2) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('read1', 'A' * 500))
reads.add(Read('read2', 'G' * 500))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(maxAlignmentsPerRead=0))
self.assertEqual(1, len(result))
self.assertEqual('read1', result[0].read.id)
def testMaxOneAlignmentPerRead(self):
"""
If L{BlastReadsAlignments} is asked to deliver only reads that have at
most one alignment, a read with two alignments must be filtered out.
"""
record = {
"query": "H6E8I1T01BFUH9",
"alignments": [
{
"length": 2885,
"hsps": [
{
"sbjct_end": 2506,
"expect": 1.25854e-43,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 182.092,
"query_start": 362
}
],
"title": "Merkel1"
},
{
"length": 2220,
"hsps": [
{
"sbjct_end": 1841,
"expect": 1.25854e-43,
"sbjct": "AATCCAGGGAATCTAATAAAATAATCAA",
"sbjct_start": 1942,
"query": "AATCCAGGGAATCTTAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 180,
"query_start": 362
}
],
"title":"Merkel2"
}
]
}
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(record) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('H6E8I1T01BFUH9', 'A' * 500))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(maxAlignmentsPerRead=1))
self.assertEqual(0, len(result))
def testScoreCutoffRemovesEntireAlignment_Bits(self):
"""
If the L{BlastReadsAlignments} filter function is supposed to filter on
a scoreCutoff (bit score) and the cut-off value results in an
alignment with no HSPs, then the alignment must be removed entirely.
"""
record = {
"query": "H6E8I1T01BFUH9",
"alignments": [
{
"length": 2885,
"hsps": [
{
"sbjct_end": 2506,
"expect": 1.25854e-10,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 150,
"query_start": 362
}
],
"title": "Merkel1"
},
{
"length": 2220,
"hsps": [
{
"sbjct_end": 1841,
"expect": 1.25854e-43,
"sbjct": "AATCCAGGGAATCTAATAAAATAATCAA",
"sbjct_start": 1942,
"query": "AATCCAGGGAATCTTAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 180,
"query_start": 362
}
],
"title": "Merkel2"
}
]
}
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(record) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('H6E8I1T01BFUH9', 'A' * 500))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(scoreCutoff=160))
self.assertEqual(1, len(result))
self.assertEqual(1, len(result[0]))
self.assertEqual('Merkel2', result[0][0].subjectTitle)
def testScoreCutoffRemovesEntireAlignment_EValue(self):
"""
If the L{BlastReadsAlignments} filter function is supposed to filter on
a scoreCutoff (bit score) and the cut-off value results in an
alignment with no HSPs, then the alignment must be removed entirely.
"""
record = {
"query": "H6E8I1T01BFUH9",
"alignments": [
{
"length": 2885,
"hsps": [
{
"sbjct_end": 2506,
"expect": 1.25e-10,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 150,
"query_start": 362
}
],
"title": "Merkel1"
},
{
"length": 2220,
"hsps": [
{
"sbjct_end": 1841,
"expect": 1.25e-30,
"sbjct": "AATCCAGGGAATCTAATAAAATAATCAA",
"sbjct_start": 1942,
"query": "AATCCAGGGAATCTTAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 180,
"query_start": 362
}
],
"title": "Merkel2"
}
]
}
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(record) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('H6E8I1T01BFUH9', 'A' * 500))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
result = list(readsAlignments.filter(scoreCutoff=1e-20))
self.assertEqual(1, len(result))
self.assertEqual(1, len(result[0]))
self.assertEqual('Merkel2', result[0][0].subjectTitle)
def testScoreCutoffRemovesHsps_Bits(self):
"""
If the L{BlastRecords} records function is supposed to filter on
scoreCutoff (bit score) and the cut-off value results in some HSPs
being invalid, then those HSPs must be removed entirely.
"""
record = {
"query": "H6E8I1T01BFUH9",
"alignments": [
{
"length": 2885,
"hsps": [
{
"sbjct_end": 2506,
"expect": 1.25854e-10,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 150,
"query_start": 362
},
{
"sbjct_end": 2506,
"expect": 1.25e-20,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 170,
"query_start": 362
}
],
"title": "Merkel1"
},
{
"length": 2220,
"hsps": [
{
"sbjct_end": 1841,
"expect": 1.25e-43,
"sbjct": "AATCCAGGGAATCTAATAAAATAATCAA",
"sbjct_start": 1942,
"query": "AATCCAGGGAATCTTAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 180,
"query_start": 362
}
],
"title": "Merkel2"
}
]
}
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(record) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('H6E8I1T01BFUH9', 'A' * 500))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(scoreCutoff=160))
# There should only be one HSP left in the alignments for the
# first read, and it should have the right score (bit score).
self.assertEqual(1, len(result[0][0].hsps))
self.assertEqual(HSP(170), result[0][0].hsps[0])
# The second alignment should also be present.
self.assertEqual(1, len(result[0][1].hsps))
self.assertEqual(HSP(180), result[0][1].hsps[0])
def testScoreCutoffRemovesHsps_EValue(self):
"""
If the L{BlastRecords} records function is supposed to filter on
scoreCutoff (bit score) and the cut-off value results in some HSPs
being invalid, then those HSPs must be removed entirely.
"""
record = {
"query": "H6E8I1T01BFUH9",
"alignments": [
{
"length": 2885,
"hsps": [
{
"sbjct_end": 2506,
"expect": 1.25e-10,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 150,
"query_start": 362
},
{
"sbjct_end": 2506,
"expect": 1.25e-20,
"sbjct": "AATCCAGGGAATGAATAAAATAATCATTAGCAGTAACAA",
"sbjct_start": 2607,
"query": "AATCCAGGGAATAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 170,
"query_start": 362
}
],
"title": "Merkel1"
},
{
"length": 2220,
"hsps": [
{
"sbjct_end": 1841,
"expect": 1.25e-30,
"sbjct": "AATCCAGGGAATCTAATAAAATAATCAA",
"sbjct_start": 1942,
"query": "AATCCAGGGAATCTTAAA-TAATCATTAGCAGTAACAA",
"frame": [1, -1],
"query_end": 462,
"bits": 180,
"query_start": 362
}
],
"title": "Merkel2"
}
]
}
mockOpener = mockOpen(read_data=dumps(PARAMS) + '\n' +
dumps(record) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('H6E8I1T01BFUH9', 'A' * 500))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
result = list(readsAlignments.filter(scoreCutoff=1e-15))
# There should only be one HSP left in the alignments for the
# first read, and it should have the right score (e-value).
self.assertEqual(1, len(result[0][0].hsps))
self.assertEqual(LSP(1.25e-20), result[0][0].hsps[0])
# The second alignment should also be present.
self.assertEqual(1, len(result[0][1].hsps))
self.assertEqual(LSP(1.25e-30), result[0][1].hsps[0])
def testTitleByRegexCaseInvariant(self):
"""
Filtering with a title regex must work independent of case.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(titleRegex='sqUIRRel'))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
result[0][0].subjectTitle)
def testTitleByRegexAllAlignments(self):
"""
Filtering with a title regex must work in the case that all alignments
for a hit match the regex.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(titleRegex='squirrel'))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
result[0][0].subjectTitle)
def testTitleByRegexOneAlignments(self):
"""
Filtering with a title regex must work in the case that only some
alignments for a hit match the regex.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(titleRegex='Mummy'))
self.assertEqual(1, len(result))
self.assertEqual('id1', result[0].read.id)
self.assertEqual('gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
result[0][0].subjectTitle)
def testTitleByNegativeRegexOneAlignment(self):
"""
Filtering with a negative title regex must work in the case that only
some alignments for a hit are ruled out (in which case only those
alignments must be removed but the hit is still valid).
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(negativeTitleRegex='Mummy'))
self.assertEqual(3, len(result))
self.assertEqual('id1', result[1].read.id)
self.assertEqual(1, len(result[1]))
self.assertEqual('gi|887699|gb|DQ37780 Monkeypox virus 456',
result[1][0].subjectTitle)
def testTitleByNegativeRegexMatchesAll(self):
"""
Filtering with a negative title regex that matches all alignments
must remove everything and return an empty result.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(negativeTitleRegex='pox'))
self.assertEqual(0, len(result))
def testTitleByNegativeRegexMatchingAllWithWhitelist(self):
"""
Filtering with a negative title regex that matches all alignments
must remove everything and result in no hits, except for any
whitelisted titles.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
result = list(readsAlignments.filter(negativeTitleRegex='pox',
whitelist=[title]))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual(1, len(result[0]))
self.assertEqual(title, result[0][0].subjectTitle)
def testTitleByRegexMatchingAllWithBlacklist(self):
"""
Filtering with a title regex that matches all alignments
must keep everything, except for any blacklisted titles.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
blacklist = ['gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55']
result = list(readsAlignments.filter(titleRegex='pox',
blacklist=blacklist))
self.assertEqual(2, len(result))
self.assertEqual('id1', result[0].read.id)
self.assertEqual('id2', result[1].read.id)
def testTitleTruncation(self):
"""
When truncating titles, if a set of matched sequences has titles that
are identical up to the truncation word, only the first found is
returned.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = readsAlignments.filter(truncateTitlesAfter='virus')
result = list(result)
self.assertEqual(3, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual(1, len(result[0]))
# The Squirrelpox virus 55 hit in RECORD0 is not returned.
self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
result[0][0].subjectTitle)
def testMinTitleSequenceLength(self):
"""
It must be possible to filter alignments based on minimum hit sequence
length.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(minSequenceLen=37500))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual(1, len(result[0]))
self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 55',
result[0][0].subjectTitle)
def testMinTitleSequenceLengthNoHits(self):
"""
It must be possible to filter alignments based on minimum hit sequence
length and if nothing sufficiently long matches, an empty list of
alignments must be returned.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(minSequenceLen=1000000))
self.assertEqual(0, len(result))
def testMaxTitleSequenceLength(self):
"""
It must be possible to filter alignments based on maximum hit sequence
length.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(maxSequenceLen=31000))
self.assertEqual(1, len(result))
self.assertEqual('id2', result[0].read.id)
self.assertEqual(1, len(result[0]))
self.assertEqual('gi|887699|gb|DQ37780 Cowpox virus 15',
result[0][0].subjectTitle)
def testMaxTitleSequenceLengthNoHits(self):
"""
It must be possible to filter alignments based on maximum hit sequence
length and if no sufficiently short sequences match, an empty
list of alignments must be returned.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(maxSequenceLen=10000))
self.assertEqual(0, len(result))
def testMinAndMaxTitleSequenceLength(self):
"""
It must be possible to filter alignments simultaneously on minimum and
maximum hit sequence length.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(minSequenceLen=37000,
maxSequenceLen=38000))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual(2, len(result[0]))
self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
result[0][0].subjectTitle)
self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 55',
result[0][1].subjectTitle)
def testMinStart(self):
"""
It must be possible to filter alignments based on minimum offset in
the hit sequence.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(minStart=15300))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual(1, len(result[0]))
self.assertEqual('gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
result[0][0].subjectTitle)
def testMinStartNoHits(self):
"""
It must be possible to filter alignments based on minimum offset in
the hit sequence, and if no hsps match then an empty result set
must be returned.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(minStart=100000))
self.assertEqual(0, len(result))
def testMaxStop(self):
"""
It must be possible to filter alignments based on maximum offset in
the hit sequence.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(maxStop=1500))
self.assertEqual(1, len(result))
self.assertEqual('id2', result[0].read.id)
self.assertEqual(1, len(result[0]))
self.assertEqual('gi|887699|gb|DQ37780 Cowpox virus 15',
result[0][0].subjectTitle)
def testMaxStopNoHits(self):
"""
It must be possible to filter alignments based on maximum offset in
the hit sequence, and if no hsps match then an empty result set must
be returned.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(maxStop=100))
self.assertEqual(0, len(result))
def testMinStartAndMaxstop(self):
"""
It must be possible to filter alignments based simultaneously on
mininum and maximum offset in the hit sequence.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(minStart=9000, maxStop=12000))
self.assertEqual(1, len(result))
self.assertEqual('id1', result[0].read.id)
self.assertEqual(2, len(result[0]))
def testRepeatedFilter_MinStartThenMinStart(self):
"""
It must be possible to filter alignments multiple times using the same
filter parameters.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json').filter(
minStart=9000).filter(minStart=9000)
result = list(readsAlignments)
self.assertEqual(2, len(result))
self.assertEqual('id0', result[0].read.id)
self.assertEqual('id1', result[1].read.id)
def testRepeatedFilter_MinStartThenMaxstop(self):
"""
It must be possible to filter alignments multiple times using different
filter parameters.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json').filter(
minStart=9000).filter(maxStop=12000)
result = list(readsAlignments)
self.assertEqual(1, len(result))
self.assertEqual('id1', result[0].read.id)
self.assertEqual(2, len(result[0]))
def testReadIdNoMatches(self):
"""
When filtering on alignments based on a regex for
read ids that matches no ids, an empty generator must be returned.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(readIdRegex='blah'))
self.assertEqual(0, len(result))
def testReadId(self):
"""
It must be possible to filter alignments based on a regex for
read ids.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(readIdRegex='id[12]'))
self.assertEqual(2, len(result))
self.assertEqual('id1', result[0].read.id)
self.assertEqual('id2', result[1].read.id)
def testReadIdAnchored(self):
"""
It must be possible to filter alignments based on a regex for
read ids that is anchored at start and end.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(readIdRegex='^id0$'))
self.assertEqual(1, len(result))
self.assertEqual('id0', result[0].read.id)
def testReadIdCaseSensitive(self):
"""
Filtering alignments based on a regex for read ids must be case
sensitive.
"""
mockOpener = mockOpen(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
result = list(readsAlignments.filter(readIdRegex='^ID0$'))
self.assertEqual(0, len(result))
|
bamueh/dark-matter
|
test/blast/test_alignments.py
|
Python
|
mit
| 68,467
|
[
"BLAST"
] |
de18fdd4d56760b4bed594a964b4f1d5d97a5e419e041870b5d9ceac42bac445
|
import numpy
import sys
import time
from mceditlib.selection import BoundingBox
from mceditlib.worldeditor import WorldEditor
from mceditlib.test import templevel
from mceditlib import relight
def do_copy(dim, station, relight):
times = 1
boxes = []
for x in range(times):
for z in range(times):
origin = (x * station.bounds.width, 54, z * station.bounds.length)
boxes.append(BoundingBox(origin, station.bounds.size))
dim.copyBlocks(station, station.bounds, origin, create=True, updateLights=relight)
return reduce(lambda a, b: a.union(b), boxes)
def manmade_relight(test):
world = templevel.TempLevel("AnvilWorld")
dim = world.getDimension()
stationEditor = WorldEditor("test_files/station.schematic")
station = stationEditor.getDimension()
startCopy = time.time()
box = do_copy(dim, station, False)
copyTime = time.time() - startCopy
print("Copy took %f seconds. Reducing relight-in-copyBlocks times by this much." % copyTime)
positions = []
for cx, cz in box.chunkPositions():
for cy in box.sectionPositions(cx, cz):
positions.append((cx, cy, cz))
assert len(positions) > box.chunkCount
if test == "post" or test == "all":
def postCopy(): # profiling
start = time.time()
count = 0
print("Relighting outside of copyBlocks. Updating %d cells" % (len(positions) * 16 * 16 * 16))
for cx, cy, cz in positions:
indices = numpy.indices((16, 16, 16), numpy.int32)
indices.shape = 3, 16*16*16
indices += ([cx << 4], [cy << 4], [cz << 4])
x, y, z = indices
relight.updateLightsByCoord(dim, x, y, z)
count += 1
t = time.time() - start
print "Relight manmade building (outside copyBlocks): " \
"%d (out of %d) chunk-sections in %.02f seconds (%f sections per second; %dms per section)" \
% (count, len(positions), t, count / t, 1000 * t / count)
postCopy()
if test == "smart" or test == "all":
def allSections():
world = templevel.TempLevel("AnvilWorld")
dim = world.getDimension()
start = time.time()
do_copy(dim, station, "all")
t = time.time() - start - copyTime
print "Relight manmade building (in copyBlocks, all sections): " \
"%d chunk-sections in %.02f seconds (%f sections per second; %dms per section)" \
% (len(positions), t, len(positions) / t, 1000 * t / len(positions))
allSections()
if test == "section" or test == "all":
def perSection():
world = templevel.TempLevel("AnvilWorld")
dim = world.getDimension()
start = time.time()
do_copy(dim, station, "section")
t = time.time() - start - copyTime
print "Relight manmade building (in copyBlocks, for each section): " \
"%d chunk-sections in %.02f seconds (%f sections per second; %dms per section)" \
% (len(positions), t, len(positions) / t, 1000 * t / len(positions))
perSection()
if __name__ == '__main__':
if len(sys.argv) > 1:
method = sys.argv[1]
print "Using method", method
relight.setMethod(method)
if len(sys.argv) > 2:
test = sys.argv[2]
else:
test = "all"
manmade_relight(test)
"""
Conclusion:
Much time is spent in the "post" method which updates all cells in the selection box, calling
updateLights on cells whose opacity values did not change. This is evidenced by the time spent in
"drawLights", which must be called because updateLights doesn't know the previous block type in
that cell.
copyBlocksFrom has been modified to find the cells whose lighting or opacity value did change,
and passing only those cells to updateLights. This is more than twice as fast, and updating
all changed cells at once is even faster, presumably because changes to following chunks will
invalidate lighting data computed by previous chunks.
Because updateLights does not know what the previous cell's opacity values were (it does know the
cell's current light value, so it can skip spreadLight if the new brightness didn't exceed that),
clients of updateLights should take care to find only cells whose opacity values changed.
copyBlocksFrom stores all changed cell positions, which could lead to MemoryErrors for very large
copies. Instead of storing all positions, it should periodically call updateLights whenever the
position list exceeds a threshold. This "batch-update" method should be an acceptable compromise
between updating for each section (suffering invalidation costs), and updating all sections
at once after the copy (risking MemoryErrors and possibly paying additional chunk loading costs)
Updating lights for chunks whose neighbors have not been copied yet will cause wasted effort.
It helps to describe this graphically. This is the current visitation order:
(area is 24x12, and 34 chunks have been copied so far)
************************
**********..............
........................
........................
........................
........................
........................
........................
........................
........................
........................
........................
'.' represents chunks that are yet to be copied.
'*' represents chunks that have been copied.
If a batched lighting update is called at this point, these are the chunks that, when they are
copied over later, will invalidate parts of the previous update:
************************
**********--------------
----------+.............
........................
........................
........................
........................
........................
........................
........................
........................
........................
'-' represents chunks that when edited will invalidate the previous lighting update applied
to the '*' chunks. There are 24 such chunks.
'+' represents chunks that when edited will invalidate at most half of a previous chunk's
update.
So let's say 24.5 chunks are invalidated later. Out of 34 chunks, that is not very good at all.
That number is roughly proportional to the width of the selection box.
The current visitation order is thus:
1234567890abcdefghijklmn
opqrstuvwx--------------
----------+.............
........................
........................
........................
........................
........................
........................
........................
........................
........................
A possibly improved visitation order:
12efghuvwx-.............
43dcjits--+.............
589bknor-...............
670almpq-...............
--------+...............
........................
........................
........................
........................
........................
........................
........................
13 full chunks and two half-chunks are invalidated, for a total of 15 chunks out of 34.
At least it's less than half.
This number is roughly proportional to the square root of the number of chunks copied so far.
The order of chunks visited by copyBlocksFrom is linear. When it calls updateLights for a chunk,
the chunks adjacent to that chunk (and ahead of that chunk in the order) will have to redo part
of this chunk's lighting for the current chunk when they are copied. To minimize wasted effort,
a chunk order that resembles a space-filling curve such as a Hilbert curve may be
applicable. The goal is to reduce the number of chunks who have neighbors yet to be copied at the
time the batched update is performed.
Maybe we can do better. What if, instead of batch-updating ALL of the chunks copied so far,
we only batch-update the ones we know won't be invalidated later?
The cells that need update are currently just tossed in a list. Instead, associate them with
their chunk position. Keep track of which chunks we have copied, and how many of their
eight neighbors have already been copied too. Only issue a batch update for chunks where all eight
neighbors are copied. If we use the original visitation order, then for very large copies, we may
reach the threshold before any neighbors have been copied. The new visitation order would avoid
this as, for most chunks, it will visit all of a chunk's neighbors very soon after that chunk.
In fact, it may not be necessary to batch-update at all if we can update a chunk as soon as all its
neighbors are ready.
Output:
Using method cython
INFO:mceditlib.block_copy:Copying 3103771 blocks from BoundingBox(origin=Vector(0, 0, 0), size=Vector(113, 121, 227)) to (0, 54, 0)
INFO:mceditlib.block_copy:Copying: Chunk 20/120...
INFO:mceditlib.block_copy:Copying: Chunk 40/120...
INFO:mceditlib.block_copy:Copying: Chunk 60/120...
INFO:mceditlib.block_copy:Copying: Chunk 80/120...
INFO:mceditlib.block_copy:Copying: Chunk 100/120...
INFO:mceditlib.block_copy:Copying: Chunk 120/120...
INFO:mceditlib.block_copy:Duration: 1.292s, 120/120 chunks, 10.77ms per chunk (92.88 chunks per second)
INFO:mceditlib.block_copy:Copied 0/0 entities and 293/293 tile entities
Copy took 1.292000 seconds. Reducing relight-in-copyBlocks times by this much.
Relighting outside of copyBlocks. Updating 3932160 cells
Relight manmade building (outside copyBlocks): 960 (out of 960) chunk-sections in 71.49 seconds (13.428639 sections per second; 74ms per section)
INFO:mceditlib.block_copy:Copying 3103771 blocks from BoundingBox(origin=Vector(0, 0, 0), size=Vector(113, 121, 227)) to (0, 54, 0)
INFO:mceditlib.block_copy:Copying: Chunk 20/120...
INFO:mceditlib.block_copy:Copying: Chunk 40/120...
INFO:mceditlib.block_copy:Copying: Chunk 60/120...
INFO:mceditlib.block_copy:Copying: Chunk 80/120...
INFO:mceditlib.block_copy:Copying: Chunk 100/120...
INFO:mceditlib.block_copy:Copying: Chunk 120/120...
INFO:mceditlib.block_copy:Duration: 1.318s, 120/120 chunks, 10.98ms per chunk (91.05 chunks per second)
INFO:mceditlib.block_copy:Copied 0/0 entities and 293/293 tile entities
INFO:mceditlib.block_copy:Updating all at once for 969 sections (646338 cells)
INFO:mceditlib.block_copy:Lighting complete.
INFO:mceditlib.block_copy:Duration: 16.979s, 968 sections, 17.54ms per section (57.01 sections per second)
Relight manmade building (in copyBlocks, all sections): 960 chunk-sections in 17.01 seconds (56.444027 sections per second; 17ms per section)
INFO:mceditlib.block_copy:Copying 3103771 blocks from BoundingBox(origin=Vector(0, 0, 0), size=Vector(113, 121, 227)) to (0, 54, 0)
INFO:mceditlib.block_copy:Copying: Chunk 20/120...
INFO:mceditlib.block_copy:Copying: Chunk 40/120...
INFO:mceditlib.block_copy:Copying: Chunk 60/120...
INFO:mceditlib.block_copy:Copying: Chunk 80/120...
INFO:mceditlib.block_copy:Copying: Chunk 100/120...
INFO:mceditlib.block_copy:Copying: Chunk 120/120...
Relight manmade building (in copyBlocks, for each section): 960 chunk-sections in 26.12 seconds (36.757667 sections per second; 27ms per section)
INFO:mceditlib.block_copy:Duration: 27.408s, 120/120 chunks, 228.40ms per chunk (4.38 chunks per second)
INFO:mceditlib.block_copy:Copied 0/0 entities and 293/293 tile entities
"""
|
vorburger/mcedit2
|
src/mceditlib/bench/time_relight_manmade.py
|
Python
|
bsd-3-clause
| 11,374
|
[
"VisIt"
] |
b9146850b83196eb224704a94ad09354c45fe88d7a5eaf5249bd2f2a389f5816
|
import os, sys, re
import distutils.dir_util
class CreateApp:
__args = {}
__path = ""
__cwd = ""
__scriptPath = ""
def __init__(self, args, scriptPath):
self.__args = args.command
self.__cwd = os.getcwd()
self.__scriptPath = scriptPath
def validate(self):
if len(self.__args) <= 1:
print "You must provide a name for your new application (command: 'create-app')"
return False
self.__path = self.__cwd + "/" + self.__args[1]
return True
def run(self):
print "New application name: %s\n" % self.__args[1]
self.__createDirectory()
self.__copyFiles()
print "Your new application '%s' is ready. To begin change directory to %s and type 'blast run-app'." % (self.__args[1], self.__args[1])
def __createDirectory(self):
print "Digging up a directory..."
if not os.path.exists(self.__path):
os.makedirs(self.__path)
def __copyFiles(self):
print "Carefully placing files..."
distutils.dir_util.copy_tree(self.__scriptPath + "/skeleton/framework", self.__path, preserve_symlinks = 1)
self.__setupProject()
distutils.dir_util.copy_tree(self.__scriptPath + "/skeleton/cf-engine", self.__path, preserve_symlinks = 1)
def __setupProject(self):
for root, subfolders, files in os.walk(self.__path):
for file in files:
fp = open(os.path.join(root, file), "rU")
raw = fp.read()
fp.close()
raw = self.__fixFile(raw)
fp = open(os.path.join(root, file), "w")
fp.write(raw)
fp.close()
def __fixFile(self, contents):
fixes = [
{ "pattern": re.compile('\$\{appName\}', re.I | re.M), "replacement": self.__args[1] }
]
result = contents
for p in fixes:
result = p["pattern"].sub(p["replacement"], result)
return result
|
adampresley/Blast-for-Basis
|
blast/commands/CreateApp.py
|
Python
|
bsd-2-clause
| 1,724
|
[
"BLAST"
] |
b5800b8606e370303ffc7e87178db1bb7099cac21aa958c1bde2a4e2abb64f52
|
# -*- coding: utf-8 -*-
"""Unit tests for functions calling combinations of tools externally."""
from pathlib2 import Path
import pytest
from imfusion.external import compound
class TestSortBam(object):
"""Unit tests for the sambamba_sort function."""
def test_sambamba_call(self, mocker):
"""Tests call that uses sambamba."""
mocker.patch.object(compound, 'which', return_value='sambamba')
mock_sambamba = mocker.patch.object(compound, 'sambamba_sort')
compound.sort_bam(Path('test.bam'), Path('sorted.bam'))
mock_sambamba.assert_called_once_with(
Path('test.bam'), Path('sorted.bam'), threads=1)
def test_pysam_call(self, mocker):
"""Tests call that uses pysam (samtools)."""
mocker.patch.object(compound, 'which', return_value=None)
mock_pysam = mocker.patch.object(compound.pysam, 'sort')
compound.sort_bam(Path('test.bam'), Path('sorted.bam'))
mock_pysam.assert_called_once_with('test.bam', 'sorted.bam')
|
jrderuiter/im-fusion
|
tests/imfusion/external/test_ext_compound.py
|
Python
|
mit
| 1,027
|
[
"pysam"
] |
3849e06d28880fdf4abd556dd707d06344ec23d1ee7715133341faba6fb5de83
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
def test_gnb_prior():
"""Test whether class priors are properly set. """
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_discrete_prior():
"""Test whether class priors are properly set. """
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
"""
Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
def test_input_check():
"""Test input checks"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls()
assert_raises(ValueError, clf.fit, X2, y2[:-1])
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
"""Test whether discrete NB classes use provided prior"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_sample_weight():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_)
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
|
florian-f/sklearn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 5,716
|
[
"Gaussian"
] |
f72ee16dfc0a97ee6c73c698dbda1bb0a8c59b58dc8b443b5fa8b5f372cc95cb
|
from datetime import datetime
import re
import traceback
import warnings
import numpy as np
import pandas as pd
from collections import defaultdict
from pandas.tslib import OutOfBoundsDatetime
from .core import indexing, ops, utils
from .core.formatting import format_timestamp, first_n_items
from .core.variable import as_variable, Variable
from .core.pycompat import iteritems, OrderedDict, PY3, basestring
# standard calendars recognized by netcdftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
def mask_and_scale(array, fill_value=None, scale_factor=None, add_offset=None,
dtype=float):
"""Scale and mask array values according to CF conventions for packed and
missing values
First, values equal to the fill_value are replaced by NaN. Then, new values
are given by the formula:
original_values * scale_factor + add_offset
Parameters
----------
array : array-like
Original array of values to wrap
fill_value : number, optional
All values equal to fill_value in the original array are replaced
by NaN. If an array of multiple values is provided a warning will be
issued and all array elements matching an value in the fill_value array
will be replaced by NaN.
scale_factor : number, optional
Multiply entries in the original array by this number.
add_offset : number, optional
After applying scale_factor, add this number to entries in the
original array.
Returns
-------
scaled : np.ndarray
Array of masked and scaled values.
References
----------
http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html
"""
# by default, cast to float to ensure NaN is meaningful
values = np.array(array, dtype=dtype, copy=True)
if fill_value is not None and not np.all(pd.isnull(fill_value)):
if getattr(fill_value, 'size', 1) > 1:
fill_values = fill_value # multiple fill values
else:
fill_values = [fill_value]
for f_value in fill_values:
if values.ndim > 0:
values[values == f_value] = np.nan
elif values == f_value:
values = np.array(np.nan)
if scale_factor is not None:
values *= scale_factor
if add_offset is not None:
values += add_offset
return values
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_netcdf4(num_dates, units, calendar):
import netCDF4 as nc4
dates = np.asarray(nc4.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'netCDF4.datetime objects instead, reason: dates out'
' of range', RuntimeWarning, stacklevel=3)
else:
try:
dates = nctime_to_nptime(dates)
except ValueError as e:
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy netCDF4.datetime objects instead, reason:'
'{0}'.format(e), RuntimeWarning, stacklevel=3)
return dates
def decode_cf_datetime(num_dates, units, calendar=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than netCDF4.num2date. In such a
case, the returned array will be of type np.datetime64.
See also
--------
netCDF4.num2date
"""
num_dates = np.asarray(num_dates, dtype=float)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using netCDF4
raise OutOfBoundsDatetime
dates = (pd.to_timedelta(flat_num_dates, delta) + ref_date).values
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_netcdf4(flat_num_dates, units, calendar)
return dates.reshape(num_dates.shape)
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape)
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit, delta in [('days', 86400), ('hours', 3600),
('minutes', 60), ('seconds', 1)]:
unit_delta = np.timedelta64(10 ** 9 * delta, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = pd.to_datetime(np.asarray(dates).ravel(), box=False)
dates = dates[pd.notnull(dates)]
unique_timedeltas = np.unique(np.diff(dates))
units = _infer_time_units_from_diff(unique_timedeltas)
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
return '%s since %s' % (units, pd.Timestamp(reference_date))
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def nctime_to_nptime(times):
"""Given an array of netCDF4.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype='M8[ns]')
for i, t in np.ndenumerate(times):
dt = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = '%s since %s' % (delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_netcdf4(dates, units, calendar):
"""Fallback method for encoding dates using netCDF4-python.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
import netCDF4 as nc4
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype('M8[us]').astype(datetime)
def encode_datetime(d):
return np.nan if d is None else nc4.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
netCDF4.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = 'proleptic_gregorian'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == 'O':
# parse with netCDF4 instead
raise OutOfBoundsDatetime
assert dates.dtype == 'datetime64[ns]'
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype('timedelta64[ns]')
ref_date = np.datetime64(pd.Timestamp(ref_date))
num = (dates - ref_date) / time_delta
except (OutOfBoundsDatetime, OverflowError):
num = _encode_datetime_with_netcdf4(dates, units, calendar)
num = cast_to_int_if_safe(num)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where(pd.isnull(timedeltas), np.nan, num)
num = cast_to_int_if_safe(num)
return (num, units)
class MaskedAndScaledArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessed, are automatically scaled and masked according to
CF conventions for packed and missing data values.
New values are given by the formula:
original_values * scale_factor + add_offset
Values can only be accessed via `__getitem__`:
>>> x = MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]), -99, 0.01, 1)
>>> x
MaskedAndScaledArray(array([-99, -1, 0, 1, 2]), fill_value=-99,
scale_factor=0.01, add_offset=1)
>>> x[:]
array([ nan, 0.99, 1. , 1.01, 1.02]
References
----------
http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html
"""
def __init__(self, array, fill_value=None, scale_factor=None,
add_offset=None, dtype=float):
"""
Parameters
----------
array : array-like
Original array of values to wrap
fill_value : number, optional
All values equal to fill_value in the original array are replaced
by NaN.
scale_factor : number, optional
Multiply entries in the original array by this number.
add_offset : number, optional
After applying scale_factor, add this number to entries in the
original array.
"""
self.array = array
self.fill_value = fill_value
self.scale_factor = scale_factor
self.add_offset = add_offset
self._dtype = dtype
@property
def dtype(self):
return np.dtype(self._dtype)
def __getitem__(self, key):
return mask_and_scale(self.array[key], self.fill_value,
self.scale_factor, self.add_offset, self._dtype)
def __repr__(self):
return ("%s(%r, fill_value=%r, scale_factor=%r, add_offset=%r, "
"dtype=%r)" %
(type(self).__name__, self.array, self.fill_value,
self.scale_factor, self.add_offset, self._dtype))
class DecodedCFDatetimeArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessed, are automatically converted into datetime objects
using decode_cf_datetime.
"""
def __init__(self, array, units, calendar=None):
self.array = array
self.units = units
self.calendar = calendar
# Verify at least one date can be decoded successfully.
# Otherwise, tracebacks end up swallowed by Dataset.__repr__ when users
# try to view their lazily decoded array.
example_value = first_n_items(array, 1) or 0
try:
result = decode_cf_datetime(example_value, units, calendar)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
self._dtype = getattr(result, 'dtype', np.dtype('object'))
@property
def dtype(self):
return self._dtype
def __getitem__(self, key):
return decode_cf_datetime(self.array[key], units=self.units,
calendar=self.calendar)
class DecodedCFTimedeltaArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessed, are automatically converted into timedelta objects
using decode_cf_timedelta.
"""
def __init__(self, array, units):
self.array = array
self.units = units
@property
def dtype(self):
return np.dtype('timedelta64[ns]')
def __getitem__(self, key):
return decode_cf_timedelta(self.array[key], units=self.units)
class CharToStringArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessed, are automatically concatenated along the last
dimension.
>>> CharToStringArray(np.array(['a', 'b', 'c']))[:]
array('abc',
dtype='|S3')
"""
def __init__(self, array):
"""
Parameters
----------
array : array-like
Original array of values to wrap.
"""
self.array = array
@property
def dtype(self):
return np.dtype('S' + str(self.array.shape[-1]))
@property
def shape(self):
return self.array.shape[:-1]
def __str__(self):
if self.ndim == 0:
# always return a unicode str if it's a single item for py3 compat
return self[...].item().decode('utf-8')
else:
return repr(self)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.array)
def __getitem__(self, key):
if self.array.ndim == 0:
values = self.array[key]
else:
# require slicing the last dimension completely
key = indexing.expanded_indexer(key, self.array.ndim)
if key[-1] != slice(None):
raise IndexError('too many indices')
values = char_to_string(self.array[key])
return values
class NativeEndiannessArray(utils.NDArrayMixin):
"""Decode arrays on the fly from non-native to native endianness
This is useful for decoding arrays from netCDF3 files (which are all
big endian) into native endianness, so they can be used with Cython
functions, such as those found in bottleneck and pandas.
>>> x = np.arange(5, dtype='>i2')
>>> x.dtype
dtype('>i2')
>>> NativeEndianArray(x).dtype
dtype('int16')
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""
def __init__(self, array):
self.array = array
@property
def dtype(self):
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
class BoolTypeArray(utils.NDArrayMixin):
"""Decode arrays on the fly from integer to boolean datatype
This is useful for decoding boolean arrays from integer typed netCDF
variables.
>>> x = np.array([1, 0, 1, 1, 0], dtype='i1')
>>> x.dtype
dtype('>i2')
>>> BoolTypeArray(x).dtype
dtype('bool')
>>> BoolTypeArray(x)[:].dtype
dtype('bool')
"""
def __init__(self, array):
self.array = array
@property
def dtype(self):
return np.dtype('bool')
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
def string_to_char(arr):
"""Like netCDF4.stringtochar, but faster and more flexible.
"""
# ensure the array is contiguous
arr = np.array(arr, copy=False, order='C')
kind = arr.dtype.kind
if kind not in ['U', 'S']:
raise ValueError('argument must be a string')
return arr.reshape(arr.shape + (1,)).view(kind + '1')
def char_to_string(arr):
"""Like netCDF4.chartostring, but faster and more flexible.
"""
# based on: http://stackoverflow.com/a/10984878/809705
arr = np.array(arr, copy=False, order='C')
kind = arr.dtype.kind
if kind not in ['U', 'S']:
raise ValueError('argument must be a string')
return arr.view(kind + str(arr.shape[-1]))[..., 0]
def safe_setitem(dest, key, value):
if key in dest:
raise ValueError('Failed hard to prevent overwriting key %r' % key)
dest[key] = value
def pop_to(source, dest, key, default=None):
"""
A convenience function which pops a key k from source to dest.
None values are not passed on. If k already exists in dest an
error is raised.
"""
value = source.pop(key, None)
if value is not None:
safe_setitem(dest, key, value)
return value
def _var_as_tuple(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def maybe_encode_datetime(var):
if np.issubdtype(var.dtype, np.datetime64):
dims, data, attrs, encoding = _var_as_tuple(var)
(data, units, calendar) = encode_cf_datetime(
data, encoding.pop('units', None), encoding.pop('calendar', None))
safe_setitem(attrs, 'units', units)
safe_setitem(attrs, 'calendar', calendar)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_encode_timedelta(var):
if np.issubdtype(var.dtype, np.timedelta64):
dims, data, attrs, encoding = _var_as_tuple(var)
data, units = encode_cf_timedelta(
data, encoding.pop('units', None))
safe_setitem(attrs, 'units', units)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_encode_offset_and_scale(var, needs_copy=True):
if any(k in var.encoding for k in ['add_offset', 'scale_factor']):
dims, data, attrs, encoding = _var_as_tuple(var)
data = data.astype(dtype=float, copy=needs_copy)
needs_copy = False
if 'add_offset' in encoding:
data -= pop_to(encoding, attrs, 'add_offset')
if 'scale_factor' in encoding:
data /= pop_to(encoding, attrs, 'scale_factor')
var = Variable(dims, data, attrs, encoding)
return var, needs_copy
def maybe_encode_fill_value(var, needs_copy=True):
# replace NaN with the fill value
if '_FillValue' in var.encoding:
dims, data, attrs, encoding = _var_as_tuple(var)
fill_value = pop_to(encoding, attrs, '_FillValue')
if not pd.isnull(fill_value):
data = ops.fillna(data, fill_value)
needs_copy = False
var = Variable(dims, data, attrs, encoding)
return var, needs_copy
def maybe_encode_dtype(var, name=None):
if 'dtype' in var.encoding:
dims, data, attrs, encoding = _var_as_tuple(var)
dtype = np.dtype(encoding.pop('dtype'))
if dtype != var.dtype and dtype.kind != 'O':
if np.issubdtype(dtype, np.integer):
if (np.issubdtype(var.dtype, np.floating) and
'_FillValue' not in var.attrs):
warnings.warn('saving variable %s with floating '
'point data as an integer dtype without '
'any _FillValue to use for NaNs' % name,
RuntimeWarning, stacklevel=3)
data = ops.around(data)[...]
if dtype == 'S1' and data.dtype != 'S1':
data = string_to_char(np.asarray(data, 'S'))
dims = dims + ('string%s' % data.shape[-1],)
data = data.astype(dtype=dtype)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_encode_bools(var):
if ((var.dtype == np.bool) and
('dtype' not in var.encoding) and ('dtype' not in var.attrs)):
dims, data, attrs, encoding = _var_as_tuple(var)
attrs['dtype'] = 'bool'
data = data.astype(dtype='i1', copy=True)
var = Variable(dims, data, attrs, encoding)
return var
def _infer_dtype(array):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.size == 0:
dtype = np.dtype(float)
else:
dtype = np.array(array[(0,) * array.ndim]).dtype
if dtype.kind in ['S', 'U']:
# don't just use inferred dtype to avoid truncating arrays to
# the length of their first element
dtype = np.dtype(dtype.kind)
elif dtype.kind == 'O':
raise ValueError('unable to infer dtype; xarray cannot '
'serialize arbitrary Python objects')
return dtype
def ensure_dtype_not_object(var):
# TODO: move this from conventions to backends? (it's not CF related)
if var.dtype.kind == 'O':
dims, data, attrs, encoding = _var_as_tuple(var)
missing = pd.isnull(data)
if missing.any():
# nb. this will fail for dask.array data
non_missing_values = data[~missing]
inferred_dtype = _infer_dtype(non_missing_values)
if inferred_dtype.kind in ['S', 'U']:
# There is no safe bit-pattern for NA in typical binary string
# formats, we so can't set a fill_value. Unfortunately, this
# means we won't be able to restore string arrays with missing
# values.
fill_value = ''
else:
# insist on using float for numeric values
if not np.issubdtype(inferred_dtype, float):
inferred_dtype = np.dtype(float)
fill_value = np.nan
data = np.array(data, dtype=inferred_dtype, copy=True)
data[missing] = fill_value
else:
data = data.astype(dtype=_infer_dtype(data))
var = Variable(dims, data, attrs, encoding)
return var
def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
var = maybe_encode_datetime(var)
var = maybe_encode_timedelta(var)
var, needs_copy = maybe_encode_offset_and_scale(var, needs_copy)
var, needs_copy = maybe_encode_fill_value(var, needs_copy)
var = maybe_encode_dtype(var, name)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var)
return var
def decode_cf_variable(var, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_endianness=True):
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var
"""
# use _data instead of data so as not to trigger loading data
var = as_variable(var)
data = var._data
dimensions = var.dims
attributes = var.attrs.copy()
encoding = var.encoding.copy()
original_dtype = data.dtype
if concat_characters:
if data.dtype.kind == 'S' and data.dtype.itemsize == 1:
dimensions = dimensions[:-1]
data = CharToStringArray(data)
if mask_and_scale:
if 'missing_value' in attributes:
# missing_value is deprecated, but we still want to support it as
# an alias for _FillValue.
if ('_FillValue' in attributes and
not utils.equivalent(attributes['_FillValue'],
attributes['missing_value'])):
raise ValueError("Discovered conflicting _FillValue "
"and missing_value. Considering "
"opening the offending dataset using "
"decode_cf=False, corrected the attributes",
"and decoding explicitly using "
"xarray.conventions.decode_cf(ds)")
attributes['_FillValue'] = attributes.pop('missing_value')
fill_value = np.array(pop_to(attributes, encoding, '_FillValue'))
if fill_value.size > 1:
warnings.warn("variable has multiple fill values {0}, decoding "
"all values to NaN.".format(str(fill_value)),
RuntimeWarning, stacklevel=3)
scale_factor = pop_to(attributes, encoding, 'scale_factor')
add_offset = pop_to(attributes, encoding, 'add_offset')
if ((fill_value is not None and not np.any(pd.isnull(fill_value))) or
scale_factor is not None or add_offset is not None):
if fill_value.dtype.kind in ['U', 'S']:
dtype = object
else:
dtype = float
data = MaskedAndScaledArray(data, fill_value, scale_factor,
add_offset, dtype)
if decode_times and 'units' in attributes:
if 'since' in attributes['units']:
# datetime
units = pop_to(attributes, encoding, 'units')
calendar = pop_to(attributes, encoding, 'calendar')
data = DecodedCFDatetimeArray(data, units, calendar)
elif attributes['units'] in TIME_UNITS:
# timedelta
units = pop_to(attributes, encoding, 'units')
data = DecodedCFTimedeltaArray(data, units)
if decode_endianness and not data.dtype.isnative:
# do this last, so it's only done if we didn't already unmask/scale
data = NativeEndiannessArray(data)
original_dtype = data.dtype
if 'dtype' in encoding:
if original_dtype != encoding['dtype']:
warnings.warn("CF decoding is overwriting dtype")
else:
encoding['dtype'] = original_dtype
if 'dtype' in attributes and attributes['dtype'] == 'bool':
del attributes['dtype']
data = BoolTypeArray(data)
return Variable(dimensions, indexing.LazilyIndexedArray(data),
attributes, encoding=encoding)
def decode_cf_variables(variables, attributes, concat_characters=True,
mask_and_scale=True, decode_times=True,
decode_coords=True, drop_variables=None):
"""
Decode a several CF encoded variables.
See: decode_cf_variable
"""
dimensions_used_by = defaultdict(list)
for v in variables.values():
for d in v.dims:
dimensions_used_by[d].append(v)
def stackable(dim):
# figure out if a dimension can be concatenated over
if dim in variables:
return False
for v in dimensions_used_by[dim]:
if v.dtype.kind != 'S' or dim != v.dims[-1]:
return False
return True
coord_names = set()
if isinstance(drop_variables, basestring):
drop_variables = [drop_variables]
elif drop_variables is None:
drop_variables = []
drop_variables = set(drop_variables)
new_vars = OrderedDict()
for k, v in iteritems(variables):
if k in drop_variables:
continue
concat = (concat_characters and v.dtype.kind == 'S' and v.ndim > 0 and
stackable(v.dims[-1]))
new_vars[k] = decode_cf_variable(
v, concat_characters=concat, mask_and_scale=mask_and_scale,
decode_times=decode_times)
if decode_coords:
var_attrs = new_vars[k].attrs
if 'coordinates' in var_attrs:
coord_str = var_attrs['coordinates']
var_coord_names = coord_str.split()
if all(k in variables for k in var_coord_names):
new_vars[k].encoding['coordinates'] = coord_str
del var_attrs['coordinates']
coord_names.update(var_coord_names)
if decode_coords and 'coordinates' in attributes:
attributes = OrderedDict(attributes)
coord_names.update(attributes.pop('coordinates').split())
return new_vars, attributes, coord_names
def decode_cf(obj, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_coords=True, drop_variables=None):
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since 'hours since 2000-01-01') to
np.datetime64.
decode_coords : bool, optional
Use the 'coordinates' attribute on variable (or the dataset itself) to
identify coordinates.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset.This may be useful to drop variables with problems or
inconsistent values.
Returns
-------
decoded : Dataset
"""
from .core.dataset import Dataset
from .backends.common import AbstractDataStore
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
file_obj = obj._file_obj
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
file_obj = obj
else:
raise TypeError('can only decode Dataset or DataStore objects')
vars, attrs, coord_names = decode_cf_variables(
vars, attrs, concat_characters, mask_and_scale, decode_times,
decode_coords, drop_variables=drop_variables)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords))
ds._file_obj = file_obj
return ds
def cf_decoder(variables, attributes,
concat_characters=True, mask_and_scale=True,
decode_times=True):
"""
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times)
return variables, attributes
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (k not in non_dim_coord_names and k not in v.dims and
any(d in target_dims for d in v.dims)):
variable_coordinates[k].add(coord_name)
global_coordinates.discard(coord_name)
variables = OrderedDict((k, v.copy(deep=False))
for k, v in variables.items())
# These coordinates are saved according to CF conventions
for var_name, coord_names in variable_coordinates.items():
attrs = variables[var_name].attrs
if 'coordinates' in attrs:
raise ValueError('cannot serialize coordinates because variable '
"%s already has an attribute 'coordinates'"
% var_name)
attrs['coordinates'] = ' '.join(map(str, coord_names))
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/057771.html
if global_coordinates:
attributes = OrderedDict(attributes)
if 'coordinates' in attributes:
raise ValueError('cannot serialize coordinates because the global '
"attribute 'coordinates' already exists")
attributes['coordinates'] = ' '.join(map(str, global_coordinates))
return variables, attributes
def encode_dataset_coordinates(dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(dataset._variables, dataset.attrs,
non_dim_coord_names=non_dim_coord_names)
def cf_encoder(variables, attributes):
"""
A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable
"""
new_vars = OrderedDict((k, encode_cf_variable(v, name=k))
for k, v in iteritems(variables))
return new_vars, attributes
|
NicWayand/xray
|
xarray/conventions.py
|
Python
|
apache-2.0
| 37,760
|
[
"NetCDF"
] |
e57239920d3a1ccbe310d9fcc90a145fa70cd6a6e84f6e592a176111b5aa5e54
|
#!/usr/bin/env python
"""
DragonPy - sbc09 memory info
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:created: 2013 by Jens Diemer - www.jensdiemer.de
:copyleft: 2013 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import logging
from MC6809.core.memory_info import BaseMemoryInfo
log = logging.getLogger("DragonPy.sbc09.mem_info")
class SBC09MemInfo(BaseMemoryInfo):
MEM_INFO = (
# generated from "monitor.lst":
(0xe400, 0xe400, "Disable interrupts."),
(0xe403, 0xe403, "Set direct page register to 0."),
(0xe411, 0xe411, "Initialize interrupt vectors from ROM."),
(0xe41b, 0xe41b, "Initialize I/O vectors from ROM."),
(0xe41d, 0xe41d, "Initialize serial port."),
(0xe41f, 0xe41f, "Enable interrupts"),
(0xe421, 0xe421, "Put the 'saved' registers of the program being monitored on top of the stack. There are 12 bytes on the stack for cc,b,a,dp,x,y,u and pc pc is initialized to 0x400, the rest to zero."),
(0xe437, 0xe437, "Clear the variable area."),
(0xe43c, 0xe43c, "Set XMODEM filler and end-of-line."),
(0xe445, 0xe445, "Print a welcome message."),
(0xe44a, 0xe44a, "Block move routine, from X to U length B. Modifies them all and A."),
(0xe452, 0xe452, "Initialize serial communications port, buffers, interrupts."),
(0xe45a, 0xe45a, "O.S. routine to read a character into B register."),
(0xe465, 0xe465, "O.S. rotuine to check if there is a character ready to be read."),
(0xe471, 0xe471, "O.S. routine to write the character in the B register."),
(0xe480, 0xe480, "O.S. routine to read a line into memory at address X, at most B chars long, return actual length in B. Permit backspace editing."),
(0xe491, 0xe491, "Recognize BS and DEL as backspace key."),
(0xe492, 0xe492, "ignore if line already zero length."),
(0xe49c, 0xe49c, "Send BS,space,BS. This erases last"),
(0xe49e, 0xe49e, "character on most terminals."),
(0xe4a0, 0xe4a0, "Decrement address."),
(0xe4ab, 0xe4ab, "CR or LF character ends line."),
(0xe4af, 0xe4af, "Move length to B"),
(0xe4b1, 0xe4b1, "restore registers."),
(0xe4b3, 0xe4b3, "<--- Here is the exit point."),
(0xe4b6, 0xe4b6, "Ignore control characters."),
(0xe4ba, 0xe4ba, "Ignore char if line full."),
(0xe4bc, 0xe4bc, "Echo the character."),
(0xe4be, 0xe4be, "Store it in memory."),
(0xe4c3, 0xe4c3, "O.S. routine to write a line starting at address X, B chars long."),
(0xe4d4, 0xe4d4, "O.S. routine to terminate a line."),
(0xe4dc, 0xe4dc, "Send the CR and LF characters."),
(0xe4e1, 0xe4e1, "Output a counted string at addr X"),
(0xe4f7, 0xe4f7, "Wait D times 20ms."),
(0xe4ff, 0xe4ff, "This table will be copied to the interrupt vector area in RAM."),
(0xe51a, 0xe51a, "And this one to the I/O vector table."),
(0xe53e, 0xe53e, "Stack something where the pc comes"),
(0xe53e, 0xe53e, "The J command returns here."),
(0xe540, 0xe540, "Stack the normal registers."),
(0xe545, 0xe545, "Stack the old pc value."),
(0xe549, 0xe549, "The G and P commands return here through a breakpoint. Registers are already stacked."),
(0xe54e, 0xe54e, "Decrement pc before breakpoint"),
(0xe550, 0xe550, "reenable the interrupts."),
(0xe552, 0xe552, "Disarm the breakpoints."),
(0xe566, 0xe566, "Ignore line if it is empty"),
(0xe569, 0xe569, "Make location after line zero."),
(0xe570, 0xe570, "Make 1st char uppercase."),
(0xe578, 0xe578, "Unknown cmd if it is not a letter."),
(0xe57d, 0xe57d, "Index into command table."),
(0xe5b4, 0xe5b4, "Unknown command handling routine."),
(0xe5c1, 0xe5c1, "Here are some useful messages."),
(0xe6c5, 0xe6c5, "Output hex digit contained in A"),
(0xe6ca, 0xe6ca, "It's the standard conversion trick ascii"),
(0xe6cb, 0xe6cb, "to hex without branching."),
(0xe6d0, 0xe6d0, "Output contents of A as two hex digits"),
(0xe6de, 0xe6de, "Output contents of d as four hex digits"),
(0xe6e7, 0xe6e7, "Skip X past spaces, B is first non-space character."),
(0xe6ee, 0xe6ee, "Convert ascii hex digit in B register to binary Z flag set if no hex digit."),
(0xe6f6, 0xe6f6, "Make uppercase."),
(0xe6f8, 0xe6f8, "If higher than digit 9 it must be a letter."),
(0xe702, 0xe702, "clear zero"),
(0xe70f, 0xe70f, "Scan for hexadecimal number at address X return in D, Z flag is set it no number found."),
(0xe74a, 0xe74a, "Scan two hexdigits at in and convert to byte into A, Z flag if error."),
(0xe760, 0xe760, "Clear zero flag"),
(0xe763, 0xe763, "This is the code for the D command, hex/ascii dump of memory Syntax: D or D<addr> or D<addr>,<length>"),
(0xe769, 0xe769, "Scan address and length, default length=64"),
(0xe77d, 0xe77d, "display row of 16 mem locations as hex"),
(0xe78a, 0xe78a, "Do a - after the eighth byte."),
(0xe792, 0xe792, "And now for the ascii dump."),
(0xe7a2, 0xe7a2, "Convert all nonprintables to ."),
(0xe7bd, 0xe7bd, 'This is the code for the E command, enter hex bytes or ascii string. Syntax E or E<addr> or E<addr> <bytes> or E<addr>"string"'),
(0xe7ca, 0xe7ca, "No bytes, then enter interactively."),
(0xe7da, 0xe7da, "Display Eaddr + space"),
(0xe7e1, 0xe7e1, "Get the line."),
(0xe7f0, 0xe7f0, "Enter a line of hex bytes or ascci string at address X, Z if empty."),
(0xe800, 0xe800, "Enter hex digits."),
(0xe824, 0xe824, "This is the code for the I command, display the contents of an address Syntax: Iaddr"),
(0xe82c, 0xe82c, "Read the byte from memory."),
(0xe82e, 0xe82e, "Display itin hex."),
(0xe836, 0xe836, "This is the code for the H command, display result of simple hex expressionSyntax Hhexnum{+|-hexnum}"),
(0xe86a, 0xe86a, "This is the code for the G command, jump to the program Syntax G or G<addr>"),
(0xe872, 0xe872, "Store parameter in pc location."),
(0xe874, 0xe874, "Arm the breakpoints."),
(0xe879, 0xe879, "This is the code for the J command, run a subroutine. Syntax J<addr>"),
(0xe87e, 0xe87e, "Save old pc"),
(0xe884, 0xe884, "Store parameter in PC location"),
(0xe88c, 0xe88c, "Move the saved register set 2 addresses"),
(0xe88e, 0xe88e, "down on the stack."),
(0xe894, 0xe894, "Prepare subroutine return address."),
(0xe896, 0xe896, "Jump to the routine."),
(0xe898, 0xe898, "Get program counter value."),
(0xe898, 0xe898, "This is the code for the P command, run instruction followed by breakpoint Syntax P"),
(0xe89b, 0xe89b, "Find out location past current insn."),
(0xe8a4, 0xe8a4, "This is the code for the T command, single step trace an instruction. Syntax T"),
(0xe8a7, 0xe8a7, "Display the contents of 8 bit register, name in B, contents in A"),
(0xe8b5, 0xe8b5, "Display the contents of 16 bit register, name in B, contents in Y"),
(0xe8c5, 0xe8c5, "Display the contents of the registers and disassemble instruction at PC location."),
(0xe8c7, 0xe8c7, "Note that there's one return address on"),
(0xe8ca, 0xe8ca, "stack so saved register offsets are"),
(0xe8cc, 0xe8cc, "inremented by 2."),
(0xe8de, 0xe8de, "S of the running program is 12 higher,"),
(0xe8e0, 0xe8e0, "because regs are not stacked when running."),
(0xe906, 0xe906, "Disassemble instruction at PC"),
(0xe90c, 0xe90c, "This is the code for the R command, display or alter the registers. Syntax R or R<letter><hex>"),
(0xe915, 0xe915, "Display regs ifnothing follows."),
(0xe91f, 0xe91f, "Make letter uppercase."),
(0xe923, 0xe923, "At end of register tab, unknown reg"),
(0xe929, 0xe929, "Found the register?"),
(0xe930, 0xe930, "Convert the hex argument."),
(0xe935, 0xe935, "Get register number."),
(0xe93b, 0xe93b, "It's 8 bit."),
(0xe93d, 0xe93d, "Remove temp stuff from stack."),
(0xe93f, 0xe93f, "Store it in the reg on stack."),
(0xe948, 0xe948, "It's 16 bit."),
(0xe94d, 0xe94d, "Convert reg no to stack offset."),
(0xe954, 0xe954, "It's the stack pointer."),
(0xe95c, 0xe95c, "Set new stack pointer."),
(0xe960, 0xe960, "Move register set to new stack location."),
(0xe970, 0xe970, "Disarm the breakpoints, this is replace the SWI instructions with the original byte."),
(0xe977, 0xe977, "Get address in u, byte in b"),
(0xe987, 0xe987, "Clear the step breakpoint."),
(0xe98a, 0xe98a, "3"),
(0xe98a, 0xe98a, "Arm the breakponts, this is replace the byte at the breakpoint address with an SWI instruction."),
(0xe98d, 0xe98d, "Arm them in reverse order of disarming."),
(0xe98f, 0xe98f, "Get address in u."),
(0xe997, 0xe997, "Compare to program counter location"),
(0xe99e, 0xe99e, "Store SWI instruction if not equal."),
(0xe9a6, 0xe9a6, "This is the code for the break command, set, clear display breakpoints. Syntax B or B<addr>. B displays, B<addr> sets or clears breakpoint."),
(0xe9a8, 0xe9a8, "Store number of breakpoints to visit."),
(0xe9b0, 0xe9b0, "No number then display breakpoints"),
(0xe9bd, 0xe9bd, "Found the breakpoint, so clear it,"),
(0xe9bf, 0xe9bf, "Is location zero"),
(0xe9c4, 0xe9c4, "Set free address to y"),
(0xe9cc, 0xe9cc, "Address not found in list of breakpoints"),
(0xe9d0, 0xe9d0, "Was free address found."),
(0xe9d2, 0xe9d2, "If so, store breakpoint there."),
(0xea06, 0xea06, "Scan hex byte into a and add it to check sum in temp2+1"),
(0xea14, 0xea14, "This tis the code for the S command, the Motorola S records entry. Syntax SO<addr> or SS<addr>,<len> or S1<bytes> or S9<bytes>"),
(0xea31, 0xea31, "clear checksum."),
(0xea35, 0xea35, "discount the address bytes from the count."),
(0xea37, 0xea37, "Read length byte."),
(0xea41, 0xea41, "Read address into d."),
(0xea4d, 0xea4d, "Sorg is nonzero and soffs is zero, now"),
(0xea4f, 0xea4f, "set soffs"),
(0xea57, 0xea57, "Subtract the address offset."),
(0xea66, 0xea66, "Check checksum."),
(0xea6c, 0xea6c, "Was it no S9 record?"),
(0xea76, 0xea76, "Store address into program counter."),
(0xea7b, 0xea7b, "Reset sorg, next S loads will be normal."),
(0xea86, 0xea86, "Error in srecord, display message."),
(0xea91, 0xea91, "Set S record origin."),
(0xea9f, 0xea9f, "Scan address and length parameter."),
(0xea9f, 0xea9f, "Send a memory region as S-records."),
(0xeab0, 0xeab0, "Compute offset for origin."),
(0xeab6, 0xeab6, "All bytes sent?"),
(0xeabe, 0xeabe, "If more than 16 left, then send 16."),
(0xeac8, 0xeac8, "Discount line length from length."),
(0xead3, 0xead3, "Clear check sum"),
(0xead9, 0xead9, "Output byte b as hex and add to check sum."),
(0xeae9, 0xeae9, "Output address (add into check sum)"),
(0xeafa, 0xeafa, "Output checksum byte."),
(0xeb0b, 0xeb0b, "Output byte in register B and add it into check sum at temp+1"),
(0xeb19, 0xeb19, "This is the code for the M command, move memory region. Syntax: Maddr1,addr2,length"),
(0xeb47, 0xeb47, "Read the argument separated by commas"),
(0xeb49, 0xeb49, "src addr to x, dest addr to u, length to y"),
(0xeb4b, 0xeb4b, "Don't tolerate syntax deviations."),
(0xeb51, 0xeb51, "Perform the block move."),
(0xeb56, 0xeb56, 'This is the code for the F command, find byte/ascii string in memory. Syntax: Faddr bytes or Faddr "ascii"'),
(0xeb5c, 0xeb5c, "Scan the start address."),
(0xeb65, 0xeb65, "Quote found, so scan for quoted string."),
(0xeb6b, 0xeb6b, "End of line without final quote."),
(0xeb6f, 0xeb6f, "End quote found"),
(0xeb76, 0xeb76, "Convert string of hex bytes."),
(0xeb79, 0xeb79, "String will be stored at start of line"),
(0xeb7b, 0xeb7b, "buffer and may overwrite part of the"),
(0xeb7c, 0xeb7c, "already converted string."),
(0xeb8c, 0xeb8c, "Start searching, start addr in Y,"),
(0xeb8d, 0xeb8d, "Quit with zero length string."),
(0xeb8d, 0xeb8d, "string starts at linebuf, length A"),
(0xeba0, 0xeba0, "Stop at I/O addresses."),
(0xebb0, 0xebb0, "Not equal, try next address."),
(0xebb7, 0xebb7, "String found"),
(0xebc2, 0xebc2, "If 10 matches found, just stop."),
(0xebca, 0xebca, "Send the contents of the xmodem buffer and get it acknowledged, zero flag is set if transfer aborted."),
(0xebcc, 0xebcc, "Send SOH"),
(0xebd1, 0xebd1, "Send block number."),
(0xebd5, 0xebd5, "and its complement."),
(0xebeb, 0xebeb, "Send the buffer contents."),
(0xebef, 0xebef, "Send the check sum"),
(0xebf7, 0xebf7, "^X for abort."),
(0xebfb, 0xebfb, "Send again if NAK"),
(0xec03, 0xec03, "Clear zero flag after ACK"),
(0xec06, 0xec06, "Start an XMODEM send session."),
(0xec08, 0xec08, "Initialize block number."),
(0xec0f, 0xec0f, "If ^X exit with zero flag."),
(0xec15, 0xec15, "Wait until NAK received."),
(0xec17, 0xec17, "Send ETX and wait for ack."),
(0xec2d, 0xec2d, "Read character into B with a timeout of A seconds, Carry set if timeout."),
(0xec41, 0xec41, "Wait until line becomes quiet."),
(0xec49, 0xec49, "Receive an XMODEM block and wait till it is OK, Z set if etx."),
(0xec51, 0xec51, "Send an ack."),
(0xec5a, 0xec5a, "Send a NAK"),
(0xec63, 0xec63, "Keep sending NAKs when timed out."),
(0xec67, 0xec67, "End of file reached, acknowledge EOT."),
(0xec6b, 0xec6b, "Not, SOH, bad block."),
(0xec73, 0xec73, "Is it the right block?"),
(0xec78, 0xec78, "Was it the previous block."),
(0xec87, 0xec87, "Is the complement of the block number OK"),
(0xec9f, 0xec9f, "Get the data bytes."),
(0xeca9, 0xeca9, "Check the check sum."),
(0xecad, 0xecad, "Block was the previous block, get next one"),
(0xecbb, 0xecbb, "EOT was received."),
(0xecc3, 0xecc3, "Send 3 acks in a row."),
(0xece6, 0xece6, "O.S. routine to open input through XMODEM transfer."),
(0xecee, 0xecee, "Display message to start XMODEM send."),
(0xecf5, 0xecf5, "Disable character output."),
(0xecfa, 0xecfa, ""),
(0xed05, 0xed05, "set xmode to 2."),
(0xed09, 0xed09, "O.S. routine to open output through XMODEM transfer."),
(0xed10, 0xed10, "Display message to start XMODEM receive"),
(0xed2e, 0xed2e, "O.S. routine to abort input through XMODEM transfer."),
(0xed3f, 0xed3f, "Send 8 CAN characters to kill transfer."),
(0xed4b, 0xed4b, "Send diagnostic message."),
(0xed4e, 0xed4e, "O.S. routine to close output through XMODEM transfer."),
(0xed62, 0xed62, "Transfer filler chars to force block out."),
(0xed64, 0xed64, "Send EOT"),
(0xed71, 0xed71, "O.S. routine to close input through XMODEM, by gobbling up the remaining bytes."),
(0xed7b, 0xed7b, "putchar routine for XMODEM"),
(0xed84, 0xed84, "Store character in XMODEM buffer."),
(0xed88, 0xed88, "is buffer full?"),
(0xed99, 0xed99, "putcr routine for XMODEM"),
(0xedb4, 0xedb4, "getchar routine for XMODEM"),
(0xedb6, 0xedb6, "No characters left?"),
(0xedbc, 0xedbc, "Receive new block."),
(0xedc1, 0xedc1, "End of input?"),
(0xedcd, 0xedcd, "Get character from buffer"),
(0xeddd, 0xeddd, "Restore I/O vectors"),
(0xedf4, 0xedf4, "This is the code for the X command, various XMODEM related commands. Syntax: XSaddr,len XLaddr,len XX XOcrlf,filler, XSSaddr,len"),
(0xedf9, 0xedf9, "Convert to uppercase."),
(0xee15, 0xee15, "XSaddr,len command."),
(0xee18, 0xee18, "Send binary through XMODEM"),
(0xee2a, 0xee2a, "Send all the bytes through XMODEM."),
(0xee2f, 0xee2f, "XSSaddr,len command."),
(0xee31, 0xee31, "Send Srecords through XMODEM"),
(0xee36, 0xee36, "XLaddr command"),
(0xee39, 0xee39, "Load binary through XMODEM"),
(0xee3f, 0xee3f, "File ended? then done"),
(0xee49, 0xee49, "XX command"),
(0xee4b, 0xee4b, "Execute commands received from XMODEM"),
(0xee63, 0xee63, "mnemonics table, ordered alphabetically. 5 bytes name, 1 byte category, 2 bytes opcode, 8 bytes total."),
(0xf2fb, 0xf2fb, "-mnemtab)/8"),
(0xf2fb, 0xf2fb, "Register table for PUSH/PULL and TFR/EXG instructions. 3 bytes for name, 1 for tfr/exg, 1 for push/pull, 5 total"),
(0xf33e, 0xf33e, "opcode offsets to basic opcode, depends on first nibble."),
(0xf34e, 0xf34e, "mode depending on first nibble of opcode."),
(0xf35e, 0xf35e, "mode depending on category code stored in mnemtab"),
(0xf36b, 0xf36b, "Decode instruction pointed to by Y for disassembly (and to find out how long it is). On return, U points to appropriate mnemonic table entry, Y points past instruction. It's rather clumsy code, but we do want to reuse the same table as used with assembling."),
(0xf36b, 0xf36b, "modes in this context: 0 no operands, 1 8-bit immediate, 2 16 bit imm, 3, 8-bit address, 4 16 bit address, 5 indexed with postbyte, 6 short relative, 7 long relative, 8 pushpul, 9 tftetx"),
(0xf37b, 0xf37b, "Store 0x10 or 0x11 prebyte."),
(0xf37e, 0xf37e, "Get new opcode."),
(0xf386, 0xf386, "Get high nibble."),
(0xf394, 0xf394, "Add opcode offset to opcode."),
(0xf397, 0xf397, "Store the 'basis' opcode."),
(0xf3a2, 0xf3a2, "Compare category code with 13"),
(0xf3a4, 0xf3a4, "13=pseudo op, no valid opcode"),
(0xf3ac, 0xf3ac, "Opcode&prebyte agree, operation found."),
(0xf3ae, 0xf3ae, "point to next mnemonic"),
(0xf3b4, 0xf3b4, "mnemonic not found, use FCB byte."),
(0xf3b9, 0xf3b9, "Store mode 3, 8 bit address."),
(0xf3c4, 0xf3c4, "if it was the combination prebyte"),
(0xf3c7, 0xf3c7, "and opcode that was not found,"),
(0xf3ca, 0xf3ca, "FCB just the prebyte"),
(0xf3cc, 0xf3cc, "The byte must be stored as operand."),
(0xf3d6, 0xf3d6, "Is it really the BSR opcode?"),
(0xf3dd, 0xf3dd, "We mistakenly found BSR instead of JSR"),
(0xf3e7, 0xf3e7, "nibble-dependent mode was 0 or 1,"),
(0xf3e9, 0xf3e9, "use category dependent mode instead."),
(0xf3f8, 0xf3f8, "jump dependent on definitive mode."),
(0xf441, 0xf441, "postbytes <0x80 have no extra operands."),
(0xf44e, 0xf44e, "Display disassembled instruction after the invocation of disdecode. U points to mnemonic table entry."),
(0xf452, 0xf452, "Display the mnemonic."),
(0xf45f, 0xf45f, "Perform action dependent on mode."),
(0xf4b2, 0xf4b2, "Walk through the register table."),
(0xf4bc, 0xf4bc, "Is bit corresponding to reg set in postbyte"),
(0xf4ca, 0xf4ca, "no u register in pshu pulu."),
(0xf4da, 0xf4da, "no s register in pshs puls."),
(0xf4df, 0xf4df, "remove the bits from postbyte."),
(0xf4eb, 0xf4eb, "print comma after first register."),
(0xf557, 0xf557, "Display ,Xreg and terminating ]"),
(0xf55c, 0xf55c, "Display ] if indirect."),
(0xf578, 0xf578, "Jump to routine for indexed mode"),
(0xf634, 0xf634, "Display byte A in decimal (0<=A<20)"),
(0xf644, 0xf644, "This is the code for the U command, unassemble instructions in memory. Syntax: U or Uaddr or Uaddr,length"),
(0xf64a, 0xf64a, "Scan address,length parameters."),
(0xf65c, 0xf65c, "Display instruction address"),
(0xf679, 0xf679, "Display instruction bytes as hex."),
(0xf683, 0xf683, "Fill out with spaces to width 11."),
(0xf689, 0xf689, "Display disassembled instruction."),
(0xf69b, 0xf69b, "Simple 'expression evaluator' for assembler."),
(0xf6ac, 0xf6ac, "Was the minus sign there."),
(0xf6b3, 0xf6b3, "Clear Z flag for valid result."),
(0xf6bd, 0xf6bd, "Hex number if starting with dollar."),
(0xf6c3, 0xf6c3, "char if starting with ' else decimal"),
(0xf6cd, 0xf6cd, "Increment past final quote if it's there."),
(0xf6d0, 0xf6d0, "Clear zero flag."),
(0xf6f3, 0xf6f3, "Multiply number by 10."),
(0xf6f7, 0xf6f7, "Add digit to 10."),
(0xf6fb, 0xf6fb, "Get next character."),
(0xf709, 0xf709, "Assemble the instruction pointed to by X. Fisrt stage: copy mnemonic to mnemonic buffer."),
(0xf714, 0xf714, "Mnemonic ends at first space or null"),
(0xf71e, 0xf71e, "Capitalize letters, but only letters."),
(0xf722, 0xf722, "Copy to mnemonic buffer."),
(0xf72f, 0xf72f, "Fill the rest of mnem buffer with spaces."),
(0xf731, 0xf731, "Second stage: look mnemonic up using binary search."),
(0xf733, 0xf733, "Low index=0"),
(0xf737, 0xf737, "High index=mnemsize."),
(0xf73d, 0xf73d, "lower limit -1?"),
(0xf741, 0xf741, "hi index lower than low index?"),
(0xf744, 0xf744, "Add indexes."),
(0xf749, 0xf749, "Divide by 2 to get average"),
(0xf751, 0xf751, "Multiply by 8 to get offset."),
(0xf755, 0xf755, "Add offset to table base"),
(0xf762, 0xf762, "Characters don't match?"),
(0xf767, 0xf767, "We found the mnemonic."),
(0xf76f, 0xf76f, "mnembuf<table, adjust high limit."),
(0xf774, 0xf774, "mnembuf>table, adjust low limit."),
(0xf77e, 0xf77e, "Stage 3: Perform routine depending on category code."),
(0xf7b4, 0xf7b4, "Cat 0, one byte opcode w/o operands RTS"),
(0xf7b8, 0xf7b8, "Cat 1, two byte opcode w/o operands SWI2"),
(0xf7bc, 0xf7bc, "Cat 2, opcode w/ immdiate operand ANDCC"),
(0xf7d1, 0xf7d1, "Cat 3, LEA"),
(0xf7dd, 0xf7dd, "No immediate w/ lea"),
(0xf7f1, 0xf7f1, "Use 8F nn nn for direct mode."),
(0xf7f7, 0xf7f7, "Cat 4, short branch instructions"),
(0xf80a, 0xf80a, "Cat 5, long brach w/ two byte opcode"),
(0xf81d, 0xf81d, "Cat 6, long branch w/ one byte opcode."),
(0xf824, 0xf824, "Cat 7, 8-bit two operand instructions ADDA"),
(0xf835, 0xf835, "Cat 8, 16-bit 2operand insns 1byte opc LDX"),
(0xf846, 0xf846, "Cat 9, 16-bit 2operand insns 2byte opc LDY"),
(0xf859, 0xf859, "Cat 10, one-operand insns NEG..CLR"),
(0xf863, 0xf863, "No immediate mode"),
(0xf869, 0xf869, "indexed etc"),
(0xf871, 0xf871, "Add 0x70 for extended direct."),
(0xf873, 0xf873, "And 0x60 for indexed etc."),
(0xf875, 0xf875, "And nothing for direct8."),
(0xf87b, 0xf87b, "Cat 11, TFR and EXG"),
(0xf8a4, 0xf8a4, "Cat 12, PSH and PUL"),
(0xf8ca, 0xf8ca, "Cat 13, pseudo oeprations"),
(0xf8cb, 0xf8cb, "Adjust opcdoe depending on mode (in 0x80-0xFF range)"),
(0xf8d2, 0xf8d2, "Is it direct?"),
(0xf8d6, 0xf8d6, "Indexed etc?"),
(0xf8d8, 0xf8d8, "Not, then immediate, no adjust."),
(0xf8d9, 0xf8d9, "Add 0x20 to opcode for indexed etc modes."),
(0xf8dc, 0xf8dc, "Add 0x10 to opcode for direct8"),
(0xf8e2, 0xf8e2, "If opsize=2, add another 0x20 for extended16"),
(0xf8e5, 0xf8e5, "Start scanning of operands."),
(0xf8ed, 0xf8ed, "This subroutine scans the assembler operands."),
(0xf8ed, 0xf8ed, "amode settings in assembler: 1=immediate, 2=direct/extended, 3=indexed etc. 4=pc relative, 5=indirect, 6=pcrelative and indirect."),
(0xf8f3, 0xf8f3, "operand starts with [, then indirect."),
(0xf906, 0xf906, "Convert to uppercase."),
(0xf91a, 0xf91a, "Could it be A,X B,X or D,X"),
(0xf930, 0xf930, "Point to the start of the operand"),
(0xf93f, 0xf93f, "Go for extended if operand unknown."),
(0xf944, 0xf944, "Can we use 8-bit operand?"),
(0xf94c, 0xf94c, "Set opsize depending on magnitude of op."),
(0xf954, 0xf954, "Or was it indirect."),
(0xf956, 0xf956, "Then we have postbyte and opsize=2"),
(0xf964, 0xf964, "Assume direct or absolute addressing"),
(0xf96b, 0xf96b, "If followed by, then indexed."),
(0xf974, 0xf974, "Was it an indirect mode?"),
(0xf979, 0xf979, "Set indirect bit."),
(0xf980, 0xf980, "Check for the other ]"),
(0xf987, 0xf987, "Immediate addressing."),
(0xf996, 0xf996, "Inirect mode w/ imm is illegal."),
(0xf9ad, 0xf9ad, "Count the - signs for autodecrement."),
(0xf9c9, 0xf9c9, "Count the + signs for autoincrement."),
(0xf9fd, 0xf9fd, "Convert to uppercase."),
(0xfa01, 0xfa01, "Check for PC relative."),
(0xfa12, 0xfa12, "Go for long index if operand unknown."),
(0xfa25, 0xfa25, "Indirect may not be 5-bit index"),
(0xfa27, 0xfa27, "It's a five-bit index."),
(0xfa5e, 0xfa5e, "Convert to uppercase"),
(0xfa68, 0xfa68, "Scan past the ,PCR"),
(0xfa6e, 0xfa6e, "Set postbyte"),
(0xfa74, 0xfa74, "Set addr mode to PCR"),
(0xfa7a, 0xfa7a, "Scan for one of the 4 index registers and adjust postbyte."),
(0xfa7c, 0xfa7c, "Convert to uppercase."),
(0xfa8c, 0xfa8c, "Index register not found where expected."),
(0xfa92, 0xfa92, "Set index reg bits in postbyte."),
(0xfa98, 0xfa98, "This routine sets amode to 3, if it was less."),
(0xfaa5, 0xfaa5, "This subroutine lays down the address."),
(0xfab7, 0xfab7, "pc rel modes."),
(0xfb0e, 0xfb0e, "This routine checks and lays down short relative address."),
(0xfb32, 0xfb32, "This routine lays down long relative address."),
(0xfb6e, 0xfb6e, "Find register for TFR and PSH instruction"),
(0xfba4, 0xfba4, "This is the code for the A command, assemble instructions. Syntax: Aaddr"),
(0xfbb5, 0xfbb5, "Print address and space."),
(0xfbbc, 0xfbbc, "Get new line"),
(0xfbbf, 0xfbbf, "Exit on empty line."),
(0xfbc4, 0xfbc4, "Make line zero terminated."),
(0xfbce, 0xfbce, "Jump table for monitor routines that are usable by other programs."),
(0xffd2, 0xffd2, "Interrupt vector addresses at top of ROM. Most are vectored through jumps in RAM."),
(0xfff2, 0xfff2, 'SWI3'),
(0xfff4, 0xfff4, 'SWI2'),
(0xfff6, 0xfff6, 'FIRQ'),
(0xfff8, 0xfff8, 'IRQ'),
(0xfffa, 0xfffa, 'SWI'),
(0xfffc, 0xfffc, 'NMI'),
(0xfffe, 0xfffe, 'RESET'),
# manually inserted:
# Memory map of SBC
(0x0, 0x40, "Zero page variables reserved by monitor and O.S."),
(0x40, 0xFF , "Zero page portion for user programs."),
(0x100, 0x17F , "Xmodem buffer 0, terminal input buffer"),
(0x180, 0x1FF , "Xmodem buffer 1, terminal output buffer"),
(0x200, 0x27F , "Terminal input line"),
(0x280, 0x2FF , "Variables reserved by monitor and O.S."),
(0x300, 0x400 , "System stack"),
(0x400, 0x7FFF , "RAM for user programs and data"),
(0x8000, 0xDFFF , "PROM for user programs"),
(0xE000, 0xE1FF , "I/O addresses"),
(0xE200, 0xE3FF , "Reserved"),
(0xE400, 0xFFFF , "Monitor ROM"),
(0xe000, 0xe000, "Control/status port of ACIA"),
(0xe001, 0xe001, "Data port of ACIA"),
)
def print_out(txt):
print(txt)
def get_sbc09_meminfo():
return SBC09MemInfo(log.debug)
if __name__ == "__main__":
mem_info = SBC09MemInfo(print_out)
mem_info(0xe000) # SERIAL INTERFACE
|
JuhaniImberg/DragonPy
|
dragonpy/sbc09/mem_info.py
|
Python
|
gpl-3.0
| 28,376
|
[
"VisIt"
] |
4134c73d354e4d1e8cf46a9bcc1a468a24b5c533a0a67a9edd551f18ae934015
|
import re
import os
from markdown.util import etree
import logging
log = logging.getLogger(__name__)
import MooseDocs
from markdown.inlinepatterns import Pattern
from MooseCommonExtension import MooseCommonExtension
import utils
class MooseImageFile(MooseCommonExtension, Pattern):
"""
Markdown extension for handling images.
Usage:
!image image_file.png|jpg|etc attribute=setting
All images/media should be stored in docs/media
"""
# Find !image /path/to/file attribute=setting
RE = r'^!image\s+(.*?)(?:$|\s+)(.*)'
def __init__(self, root=None, **kwargs):
MooseCommonExtension.__init__(self)
Pattern.__init__(self, self.RE, **kwargs)
self._root = os.path.join(root, 'docs/media')
# Valid settings for MOOS specific documentation features
# All other markdown 'attributes' will be treated as HTML
# style settings
self._settings = {'caption' : None}
def handleMatch(self, match):
"""
process settings associated with !image markdown
"""
rel_filename = match.group(2)
# A tuple separating specific MOOSE documentation features (self._settings) from HTML styles
settings, styles = self.getSettings(match.group(3))
# Read the file and create element
filename = self.checkFilename(rel_filename)
if not filename:
el = self.createErrorElement(rel_filename, message='file not found')
else:
# When aligning to one side or another, we need to adjust the margins
# on the opisite side... silly looking buy necessary
reverse_margin = { 'left' : 'right',
'right' : 'left',
'None' : 'none'}
el_list = {}
el_list['div'] = etree.Element('div')
el_list['figure'] = etree.SubElement(el_list['div'], 'figure')
el_list['img'] = etree.SubElement(el_list['figure'], 'img')
el_list['img'].set('src', os.path.join('/media', os.path.basename(filename)))
# Set the default figcaption text alignment
el_list['figure'].set('style', 'text-align: left; display: table;')
# Set any extra supplied attributes
if el_list['div'].get('style') != None:
previous_style = el_list['div'].get('style')
else:
previous_style = ''
for attribute in styles.keys():
if attribute == 'align':
el_list['div'].set('style', ';'.join(['text-align: -moz-{}; text-align: -webkit-{};'.format(styles[attribute],
styles[attribute]),
previous_style]))
elif attribute == 'float' and styles[attribute] is not None:
el_list['figure'].set('style', el_list['figure'].get('style') + \
'float: {}; margin-{}: 20px'.format(styles[attribute], reverse_margin[styles[attribute]]))
elif styles[attribute] != None:
el_list['img'].set(attribute, str(styles[attribute]))
# if caption set, add figcaption
if settings['caption'] != None:
# Unset the large default bottom-margin for figcaption
el_list['img'].set('style', 'margin-bottom: unset;')
el_list['figcaption'] = etree.SubElement(el_list['figure'], 'figcaption')
el_list['figcaption'].text = settings['caption']
el = el_list['div']
return el
|
katyhuff/moose
|
python/MooseDocs/extensions/MooseImageFile.py
|
Python
|
lgpl-2.1
| 3,724
|
[
"MOOSE"
] |
7c78a1476b281545731f59c15fce9b126e0e1dab5c8ef2e2181d6b3373a3d3a7
|
from __future__ import print_function
import sys
if sys.version_info[0] < 3:
print('This package does not support python2. Try `python3 setup.py`', file=sys.stderr)
sys.exit(1)
import subprocess
from setuptools import setup
from setuptools import find_packages
setup(
name='vaspmd',
version = '0.0',
description = 'Personal scripts for automating MD runs in Vasp',
url = 'https://github.com/ExpHP/vaspmd',
author = 'Michael Lamparski',
author_email = 'lampam@rpi.edu',
entry_points={
'console_scripts':[
'md-init = vaspmd.md_init:main',
'vasp-search = vaspmd.search:main',
],
},
scripts=[
'scripts/md-run',
],
install_requires=[
'pytoml',
],
packages=find_packages(), # include sub-packages
)
|
ExpHP/vaspmd
|
setup.py
|
Python
|
mit
| 730
|
[
"VASP"
] |
41b99d8dc299c99c62aca9dbfd2f23705563cb8ae2a91cc8ebdb7e6e0e868f40
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the model. Some of them
are already built-in::
>>> model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.similarity('woman', 'man')
0.73723527
>>> model['computer'] # raw numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
and so on.
If you're finished training a model (=no more updates, only querying), you can do
>>> model.init_sims(replace=True)
to trim unneeded model memory = use (much) less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformed[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
from gensim.utils import keep_vocab_item
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger("gensim.models.word2vec")
try:
from gensim.models.word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
def train_sentence_sg(model, sentence, alpha, work=None):
"""
Update skip-gram model by training on a single sentence.
The sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(model, model.index2word[word.index], word2.index, alpha)
return len(word_vocabs)
def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a single sentence.
The sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha)
return len(word_vocabs)
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None):
if context_vectors is None:
context_vectors = model.syn0
if context_locks is None:
context_locks = model.syn0_lockf
if word not in model.vocab:
return
predict_word = model.vocab[word] # target word (NN output)
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
model.syn0[i] += neu1e * model.syn0_lockf[i]
return neu1e
# could move this import up to where train_* is imported,
# but for now just do it separately incase there are unforseen bugs in score_
try:
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
except ImportError:
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(sentence[start:(pos + model.window + 1)], start):
# don't train on OOV words and on the `word` itself
if word2 and not (pos2 == pos):
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(sentence[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1)
return log_prob_sentence
def score_sg_pair(model, word, word2):
l1 = model.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = -1.0**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
def score_cbow_pair(model, word, word2_indices, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = -1.0**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`.
"""
def __init__(
self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001,
sg=1, hs=1, negative=0, cbow_mean=0, hashfxn=hash, iter=1, null_word=0,
trim_rule=None, sorted_vocab=1):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=1`), skip-gram is used.
Otherwise, `cbow` is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator. Initial vectors for each
word are seeded with a hash of the concatenation of word + str(seed).
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0).
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
`cbow_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when cbow is used.
`hashfxn` = hash function to use to randomly initialize weights, for increased
training reproducibility. Default is Python's rudimentary built in hash function.
`iter` = number of iterations (epochs) over the corpus.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
`sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before
assigning word indexes.
"""
self.vocab = {} # mapping from a word (string) to a Vocab object
self.index2word = [] # map from a word's matrix index (int) to word (string)
self.sg = int(sg)
self.cum_table = None # for negative sampling
self.vector_size = int(size)
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.window = int(window)
self.max_vocab_size = max_vocab_size
self.seed = seed
self.random = random.RandomState(seed)
self.min_count = min_count
self.sample = sample
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.train_count = 0
self.total_train_time = 0
self.sorted_vocab = sorted_vocab
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences, trim_rule=trim_rule)
self.train(sentences)
def make_cum_table(self, power=0.75, domain=2**31 - 1):
"""
Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(self.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
cumulative = 0.0
for word_index in range(vocab_size):
cumulative += self.vocab[self.index2word[word_index]].count**power / train_words_pow
self.cum_table[word_index] = round(cumulative * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words", len(self.vocab))
# build the huffman tree
heap = list(itervalues(self.vocab))
heapq.heapify(heap)
for i in xrange(len(self.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
self.scan_vocab(sentences, trim_rule=trim_rule) # initial survey
self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule) # trim by min_count & precalculate downsampling
self.finalize_vocab() # build tables & arrays
def scan_vocab(self, sentences, progress_per=10000, trim_rule=None):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
for sentence_no, sentence in enumerate(sentences):
if sentence_no % progress_per == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, sum(itervalues(vocab)) + total_words, len(vocab))
for word in sentence:
vocab[word] += 1
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
total_words += utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
total_words += sum(itervalues(vocab))
logger.info("collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1)
self.corpus_count = sentence_no + 1
self.raw_vocab = vocab
def scale_vocab(self, min_count=None, sample=None, dry_run=False, keep_raw_vocab=False, trim_rule=None):
"""
Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
# Discard words less-frequent than min_count
if not dry_run:
self.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.vocab = {}
drop_unique, drop_total, retain_total, original_total = 0, 0, 0, 0
retain_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
original_total += v
if not dry_run:
self.vocab[word] = Vocab(count=v, index=len(self.index2word))
self.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_total += v
logger.info("min_count=%d retains %i unique words (drops %i)",
min_count, len(retain_words), drop_unique)
logger.info("min_count leaves %i word corpus (%i%% of original %i)",
retain_total, retain_total * 100 / max(original_total, 1), original_total)
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info("downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total)
# return from each step: words-affected, resulting-corpus-size
report_values = {'drop_unique': drop_unique, 'retain_total': retain_total,
'downsample_unique': downsample_unique, 'downsample_total': int(downsample_total)}
# print extra memory estimates
report_values['memory'] = self.estimate_memory(vocab_size=len(retain_words))
return report_values
def finalize_vocab(self):
"""Build tables and model weights based on final vocabulary settings."""
if not self.index2word:
self.scale_vocab()
if self.sorted_vocab:
self.sort_vocab()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(self.vocab)
self.index2word.append(word)
self.vocab[word] = v
# set initial input/projection and hidden weights
self.reset_weights()
def sort_vocab(self):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if hasattr(self, 'syn0'):
raise RuntimeError("must sort before initializing vectors/weights")
self.index2word.sort(key=lambda word: self.vocab[word].count, reverse=True)
for i, word in enumerate(self.index2word):
self.vocab[word].index = i
def reset_from(self, other_model):
"""
Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.vocab = other_model.vocab
self.index2word = other_model.index2word
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.reset_weights()
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
raw_tally = 0
for sentence in job:
if self.sg:
tally += train_sentence_sg(self, sentence, alpha, work)
else:
tally += train_sentence_cbow(self, sentence, alpha, work, neu1)
raw_tally += len(sentence)
return (tally, raw_tally)
def _raw_word_count(self, items):
return sum(len(item) for item in items)
def train(self, sentences, total_words=None, word_count=0, chunksize=100, total_examples=None, queue_factor=2, report_delay=1):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, either total_examples
(count of sentences) or total_words (count of raw words in sentences) should be provided, unless the
sentences are the same as those that were used to initially build the vocabulary.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension not loaded for Word2Vec, training will be slow. "
"Install a C compiler and reinstall gensim for fast training.")
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
logger.info(
"training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.vocab), self.layer1_size, self.sg,
self.hs, self.sample, self.negative)
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
if not hasattr(self, 'syn0'):
raise RuntimeError("you must first finalize vocabulary before training the model")
if total_words is None and total_examples is None:
if self.corpus_count:
total_examples = self.corpus_count
logger.info("expecting %i examples, matching count from corpus used for vocabulary survey", total_examples)
else:
raise ValueError("you must provide either total_words or total_examples, to enable alpha and progress calculations")
if self.iter > 1:
sentences = utils.RepeatCorpusNTimes(sentences, self.iter)
total_words = total_words and total_words * self.iter
total_examples = total_examples and total_examples * self.iter
def worker_init():
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
return (work, neu1)
def worker_one_job(job, inits):
items, alpha = job
if items is None: # signal to finish
return False
# train & return tally
tally, raw_tally = self._do_train_job(items, alpha, inits)
progress_queue.put((len(items), tally, raw_tally)) # report progress
return True
def worker_loop():
"""Train the model, lifting lists of sentences from the jobs queue."""
init = worker_init()
while True:
job = job_queue.get()
if not worker_one_job(job, init):
break
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
if self.workers > 0:
job_queue = Queue(maxsize=queue_factor * self.workers)
else:
job_queue = FakeJobQueue(worker_init, worker_one_job)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
pushed_words = 0
pushed_examples = 0
example_count = 0
trained_word_count = 0
raw_word_count = word_count
push_done = False
done_jobs = 0
next_alpha = self.alpha
jobs_source = enumerate(utils.grouper(sentences, chunksize))
job_no = -1
# fill jobs queue with (sentence, alpha) job tuples
while True:
try:
job_no, items = next(jobs_source)
logger.debug("putting job #%i in the queue at alpha %.05f", job_no, next_alpha)
job_queue.put((items, next_alpha))
# update the learning rate before every next job
if self.min_alpha < next_alpha:
if total_examples:
# examples-based decay
pushed_examples += len(items)
next_alpha = self.alpha - (self.alpha - self.min_alpha) * (pushed_examples / total_examples)
else:
# words-based decay
pushed_words += self._raw_word_count(items)
next_alpha = self.alpha - (self.alpha - self.min_alpha) * (pushed_words / total_words)
next_alpha = max(next_alpha, self.min_alpha)
except StopIteration:
if job_no == -1 and self.train_count == 0:
logger.warning(
"train() called with empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable "
"iteration)."
)
logger.info(
"reached end of input; waiting to finish %i outstanding jobs",
job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put((None, 0)) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no+1) or not push_done:
examples, trained_words, raw_words = progress_queue.get(push_done) # only block after all jobs pushed
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
if total_examples:
# examples-based progress %
logger.info(
"PROGRESS: at %.2f%% examples, %.0f words/s",
100.0 * example_count / total_examples, trained_word_count / elapsed)
else:
# words-based progress %
logger.info(
"PROGRESS: at %.2f%% words, %.0f words/s",
100.0 * raw_word_count / total_words, trained_word_count / elapsed)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
logger.info(
"training on %i raw words took %.1fs, %.0f trained words/s",
raw_word_count, elapsed, trained_word_count / elapsed if elapsed else 0.0)
if total_examples and total_examples != example_count:
logger.warn("supplied example count (%i) did not equal expected count (%i)", example_count, total_examples)
if total_words and total_words != raw_word_count:
logger.warn("supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words)
self.train_count += 1 # number of times train() has been called
self.total_train_time += elapsed
self.clear_sims()
return trained_word_count
def _score_job_words(self, sentence, inits):
work, neu1 = inits
if self.sg:
return score_sentence_sg(self, sentence, work)
else:
return score_sentence_cbow(self, sentence, work, neu1)
# basics copied from the train() function
def score(self, sentences, total_sentences=int(1e9), chunksize=100, queue_factor=2, report_delay=1):
"""
Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that)
Note that you should specify total_sentences; we'll run into problems if you ask to score more than the default
See the article by Taddy [taddy]_ for examples of how to use such scores in document classification.
.. [taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations, in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness.")
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative)
if not self.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError("we have only implemented score for hs")
def worker_init():
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
return (work, neu1)
def worker_one_job(job, inits):
if job is None: # signal to finish
return False
ns = 0
for (id, sentence) in job:
sentence_scores[id] = self._score_job_words(sentence, inits)
ns += 1
progress_queue.put(ns) # report progress
return True
def worker_loop():
"""Train the model, lifting lists of sentences from the jobs queue."""
init = worker_init()
while True:
job = job_queue.get()
if not worker_one_job(job, init):
break
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
if self.workers > 0:
job_queue = Queue(maxsize=queue_factor * self.workers)
else:
job_queue = FakeJobQueue(worker_init, worker_one_job)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info(
"reached end of input; waiting to finish %i outstanding jobs",
job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no+1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info("scoring %i sentences took %.1fs, %.0f sentences/s"
% (sentence_count, elapsed, sentence_count / elapsed if elapsed else 0.0))
return sentence_scores[:sentence_count]
def clear_sims(self):
self.syn0norm = None
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.syn0 = empty((len(self.vocab), self.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.vocab)):
# construct deterministic seed from word AND seed argument
self.syn0[i] = self.seeded_vector(self.index2word[i] + str(self.seed))
if self.hs:
self.syn1 = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
self.syn0norm = None
self.syn0_lockf = ones(len(self.vocab), dtype=REAL) # zeros suppress learning
def seeded_vector(self, seed_string):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(uint32(self.hashfxn(seed_string)))
return (once.rand(self.vector_size) - 0.5) / self.vector_size
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
"""
if fvocab is not None:
logger.info("storing vocabulary in %s" % (fvocab))
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.vector_size, fname))
assert (len(self.vocab), self.vector_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, norm_only=True, encoding='utf8', unicode_errors='strict'):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
result = cls(size=vector_size)
result.syn0 = zeros((vocab_size, vector_size), dtype=REAL)
def add_word(word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.syn0.shape[0], len(result.vocab)
)
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), result.vector_size) == result.syn0.shape
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
result.init_sims(norm_only)
return result
def intersect_word2vec_format(self, fname, binary=False, encoding='utf8', unicode_errors='strict'):
"""
Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
`binary` is a boolean indicating whether the data is in binary word2vec format.
"""
overlap_count = 0
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
self.syn0_lockf[self.vocab[word].index] = 0.0 # lock it
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
logger.info("merged %d vectors into %s matrix from %s" % (overlap_count, self.syn0.shape, fname))
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
raise KeyError("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = set()
def word_vec(word):
if isinstance(word, ndarray):
return word
elif word in self.vocab:
all_words.add(self.vocab[word].index)
return self.syn0norm[self.vocab[word].index]
else:
raise KeyError("word '%s' not in vocabulary" % word)
positive = [word_vec(word) for word in positive]
negative = [word_vec(word) for word in negative]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
words = [word for word in words if word in self.vocab] # filter out OOV words
logger.debug("using words %s" % words)
if not words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, words))[0][1]
def __getitem__(self, words):
"""
Accept a single word or a list of words as input.
If a single word: returns the word's representations in vector space, as
a 1D numpy array.
Multiple words: return the words' representations in vector space, as a
2d numpy array: #words x #vector_size. Matrix rows are in the same order
as in input.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
>>> trained_model[['office', 'products']]
array([ -1.40128313e-02, ...]
[ -1.70425311e-03, ...]
...)
"""
if isinstance(words, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.syn0[self.vocab[words].index]
return vstack([self.syn0[self.vocab[word].index] for word in words])
def __contains__(self, word):
return word in self.vocab
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Example::
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
if hasattr(self, 'syn1'):
del self.syn1
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.vocab)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info("estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total'])
return report
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info("%s: %.1f%% (%i/%i)" %
(section['section'], 100.0 * correct / (correct + incorrect),
correct, correct + incorrect))
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word whose frequency
is not in the top-N most frequent words (default top 30,000).
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = dict(sorted(iteritems(self.vocab),
key=lambda item: -item[1].count)[:restrict_vocab])
ok_index = set(v.index for v in itervalues(ok_vocab))
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
a, b, c, expected = [word.lower() for word in line.split()] # TODO assumes vocabulary preprocessing uses lowercase, too...
except:
logger.info("skipping invalid line #%i in %s" % (line_no, questions))
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s" % (line_no, line.strip()))
continue
ignore = set(self.vocab[v].index for v in [a, b, c]) # indexes of words to ignore
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab)
for index in matutils.argsort(sims, reverse=True):
if index in ok_index and index not in ignore:
predicted = self.index2word[index]
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': sum((s['correct'] for s in sections), []),
'incorrect': sum((s['incorrect'] for s in sections), []),
}
self.log_accuracy(total)
sections.append(total)
return sections
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (self.__class__.__name__, len(self.index2word), self.vector_size, self.alpha)
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
save.__doc__ = utils.SaveLoad.save.__doc__
@classmethod
def load(cls, *args, **kwargs):
model = super(Word2Vec, cls).load(*args, **kwargs)
# update older models
if hasattr(model, 'table'):
delattr(model, 'table') # discard in favor of cum_table
if model.negative and hasattr(model, 'index2word'):
model.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(model, 'corpus_count'):
model.corpus_count = None
for v in model.vocab.values():
if hasattr(v, 'sample_int'):
break # already 0.12.0+ style int probabilities
else:
v.sample_int = int(round(v.sample_probability * 2**32))
del v.sample_probability
if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'):
model.syn0_lockf = ones(len(model.syn0), dtype=REAL)
if not hasattr(model, 'random'):
model.random = random.RandomState(model.seed)
if not hasattr(model, 'train_count'):
model.train_count = 0
model.total_train_time = 0
return model
class FakeJobQueue(object):
"""Pretends to be a Queue; does equivalent of work_loop in calling thread."""
def __init__(self, init_fn, job_fn):
self.inits = init_fn()
self.job_fn = job_fn
def put(self, job):
self.job_fn(job, self.inits)
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=1000):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
sentence.extend(rest.split()) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""
Simple format: one sentence = one line; words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=10000, limit=None):
"""
`source` can be either a string or a file object. Clip the file to the first
`limit` lines (or no clipped if limit is None, the default).
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
# Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
logging.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
infile = sys.argv[1]
from gensim.models.word2vec import Word2Vec # avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
# model = Word2Vec(LineSentence(infile), size=200, min_count=5, workers=4)
model = Word2Vec(Text8Corpus(infile), size=200, min_count=5, workers=1)
if len(sys.argv) > 3:
outfile = sys.argv[3]
model.save(outfile + '.model')
model.save_word2vec_format(outfile + '.model.bin', binary=True)
model.save_word2vec_format(outfile + '.model.txt', binary=False)
if len(sys.argv) > 2:
questions_file = sys.argv[2]
model.accuracy(sys.argv[2])
logging.info("finished running %s", program)
|
krishna11888/ai
|
third_party/gensim/gensim/models/word2vec.py
|
Python
|
gpl-2.0
| 74,478
|
[
"VisIt"
] |
e0ecd854503a05067cbf70041c583ea7b81093bced356468d7c66c6f94be3229
|
# -*- coding: utf-8 -*-
# ***********************************************************************
# Copyright (C) 2016 - 2017 Oscar Gerardo Lazo Arjona *
# <oscar.lazoarjona@physics.ox.ac.uk> *
# ***********************************************************************
r"""This example calculates the inhomogeneous dephasing."""
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from multiprocessing import Pool, cpu_count
from time import time
from quantum_memories import orca
from quantum_memories.misc import set_parameters_ladder, efficiencies
def model_theoretical(t, amp, sigma):
"""Return points on a gaussian with the given amplitude and dephasing."""
return amp*np.exp(-(t/sigma)**2)
def efficiencies_delay(i, Nv, explicit_decoherence=None):
r"""Get the efficiencies with t0r = t0w + (3.5+i) ns."""
name = "ORCA"+str(i)
# We set custom parameters (different from those in settings.py)
t0w = default_params["t0w"]
params = set_parameters_ladder({"Nv": Nv, "Nt": 51000, "T": 16e-9,
"t0r": t0w+3.5e-9+i*1e-9,
"verbose": 0})
t0w = params["t0w"]
# t0r = params["t0r"]
# print "......................................."
# print str(i)+"th readout, t0r - t0w =", (t0r - t0w)*1e9, "ns"
# We call the solver from orca_solver.py
t, Z, vZ, rho1, Om1 = orca.solve(params, plots=False, name=name)
# We calculate the efficiencies.
eff_in, eff_out, eff = efficiencies(t, Om1, params,
plots=True, name=name)
if explicit_decoherence is not None:
eff_out = eff_out*explicit_decoherence
eff = eff_in*eff_out
# print "eff_in, eff_out, eff =", eff_in, eff_out, eff
return eff_in, eff_out, eff
###############################################################################
# The number of processors available for parallel computing.
Nprocs = cpu_count()
# We set the default parameters taking them from settings.py.
default_params = set_parameters_ladder()
if __name__ == '__main__':
###########################################################################
# We test various readout times.
print "Testing readout times..."
t0 = time()
Nv = 9
Ndelay = 8
tdelay = np.array([3.5e-9 + i*1e-9 for i in range(Ndelay)])
t0r_list = []
eff_in_list = []; eff_out_list = []; eff_list = []
eff_in_list = np.zeros(Ndelay)
eff_out_list = np.zeros(Ndelay)
eff_list = np.zeros(Ndelay)
# We create the parallel processes.
pool = Pool(processes=Nprocs)
# We calculate the efficiencies in parallel.
procs = [pool.apply_async(efficiencies_delay, [i, Nv])
for i in range(Ndelay)]
# We get the results.
aux = [procs[i].get() for i in range(Ndelay)]
pool.close()
pool.join()
# We save the results with more convenient names.
for i in range(Ndelay):
eff_in_list[i], eff_out_list[i], eff_list[i] = aux[i]
####################################
# We plot the total efficiencies.
plt.title(r"$\mathrm{Dephasing}$", fontsize=20)
plt.plot(tdelay*1e9, eff_list, "k+",
label=r"$\eta_{\mathrm{model}}$", ms=10)
# We fit a gaussian.
amp_the, sig_the = curve_fit(model_theoretical, tdelay, eff_list,
p0=[1.0, 5.4e-9])[0]
print "Nv, 1/e time, calculation time:", Nv, sig_the*1e9, "ns",
print (time() - t0)/60.0, "min"
# We make a plot of the fitted gaussian.
tdelay_cont = np.linspace(0, tdelay[-1]*1.05, 500)
eff_exp = model_theoretical(tdelay_cont, amp_the, 5.4e-9)
eff_the = model_theoretical(tdelay_cont, amp_the, sig_the)
plt.plot(tdelay_cont*1e9, eff_exp, "k-",
label=r"$\eta_{\mathrm{experiment}}$")
plt.plot(tdelay_cont*1e9, eff_the, "k:",
label=r"$\eta_{\mathrm{fit}}$")
plt.xlabel(r"$t_{0\mathrm{r}}-t_{0\mathrm{w}} \ \mathrm{(ns)}$",
fontsize=20)
plt.ylabel(r"$\eta$", fontsize=20)
plt.ylim([0, None])
plt.legend(fontsize=15)
plt.savefig("doppler_dephasing.png", bbox_inches="tight")
plt.savefig("doppler_dephasing.pdf", bbox_inches="tight")
plt.close("all")
###########################################################################
# We now repeat exactly the same thing using different number of velocity
# classes.
Nvmax = 13
Nv = [2*i + 1 for i in range((Nvmax-1)/2+1)]
# print Nv
sigma = np.zeros(len(Nv))
print
print "Calculating the best number of velocity classes to use..."
for jj in range(len(Nv)):
t0 = time()
Nvi = Nv[jj]
Ndelay = 8
tdelay = np.array([3.5e-9 + i*1e-9 for i in range(Ndelay)])
t0r_list = []
eff_in_list = []; eff_out_list = []; eff_list = []
eff_in_list = np.zeros(Ndelay)
eff_out_list = np.zeros(Ndelay)
eff_list = np.zeros(Ndelay)
# We create the parallel processes.
pool = Pool(processes=Nprocs)
# We calculate the efficiencies in parallel.
procs = [pool.apply_async(efficiencies_delay, [i, Nvi])
for i in range(Ndelay)]
# We get the results.
aux = [procs[i].get() for i in range(Ndelay)]
pool.close()
pool.join()
# We save the results with more convenient names.
for i in range(Ndelay):
eff_in_list[i], eff_out_list[i], eff_list[i] = aux[i]
####################################
# We plot the total efficiencies.
plt.title(r"$\mathrm{Dephasing}$", fontsize=20)
plt.plot(tdelay*1e9, eff_list, "k+", label=r"$\eta_{\mathrm{model}}$",
ms=10)
# We fit a gaussian.
amp_the, sig_the = curve_fit(model_theoretical, tdelay, eff_list,
p0=[1.0, 5.4e-9])[0]
sigma[jj] = abs(sig_the)
print "Nv, 1/e time, calculation time:", Nvi, abs(sig_the)*1e9, "ns",
print (time() - t0)/60.0, "min"
# We make a plot of the fitted gaussian.
tdelay_cont = np.linspace(0, tdelay[-1]*1.05, 500)
eff_exp = model_theoretical(tdelay_cont, amp_the, 5.4e-9)
eff_the = model_theoretical(tdelay_cont, amp_the, sig_the)
plt.plot(tdelay_cont*1e9, eff_exp, "k-",
label=r"$\eta_{\mathrm{experiment}}$")
plt.plot(tdelay_cont*1e9, eff_the, "k:",
label=r"$\eta_{\mathrm{fit}}$")
plt.xlabel(r"$t_{0\mathrm{r}}-t_{0\mathrm{w}} \ \mathrm{(ns)}$",
fontsize=20)
plt.ylabel(r"$\eta$", fontsize=20)
plt.ylim([0, None])
plt.legend(fontsize=15)
plt.savefig("doppler_dephasing"+str(Nvi)+".png", bbox_inches="tight")
plt.savefig("doppler_dephasing"+str(Nvi)+".pdf", bbox_inches="tight")
plt.close("all")
# We plot the dephasing as a function of the number of velocity groups
# being considered.
# Nv = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]
# sigma = [3.08691878e-08, 3.05867013e-08, 1.28434666e-08, 1.14965758e-08,
# 1.14866015e-08, 1.14866867e-08, 1.14867979e-08, 1.14869120e-08,
# 1.14870206e-08, 1.14871203e-08, 1.14872106e-08]
sigma = np.array(sigma)
i = 0
# print Nv
# print sigma
fig, ax = plt.subplots()
ax.plot(Nv[i:], sigma[i:]*1e9, "r+")
ax.set_xticks(Nv[i:])
ax.set_xlabel(r"$N_v$", fontsize=20)
ax.set_ylabel(r"$1/e \ \mathrm{time \ (ns)}$", fontsize=20)
plt.savefig("doppler_dephasing_velocities.png", bbox_inches="tight")
plt.savefig("doppler_dephasing_velocities.pdf", bbox_inches="tight")
plt.close("all")
print
print "So 9 velocity classes seem like a good compromise."
|
oscarlazoarjona/quantum_memories
|
examples/orca/doppler_dephasing/doppler_dephasing.py
|
Python
|
gpl-3.0
| 7,853
|
[
"Gaussian",
"ORCA"
] |
34f7713d73e2da5782de0b779af881245b5ff6467e197a18530f48a88a54c6e1
|
""" Algorithm for clustering : Adaptive Affinity propagation
"""
# Author: Ilya Patrushev ilya.patrushev@gmail.com
# License: BSD clause 3
import numpy as np
import warnings
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import as_float_array
from sklearn.utils import check_random_state
from sklearn.metrics import euclidean_distances
def average_silhouette(I, S):
"""
Computes average Silhouette index
Parameters
----------
I: array
Cluster exemplair indeices
S: array [n_samples, n_samples]
Matrix of similarities between points
Returns
-------
float
average silhouette index
"""
K = I.size
n_samples = S.shape[0]
D = -S
if K < 2:
return 0
a = np.zeros(n_samples)
b = np.zeros(n_samples)
b[:] = np.inf
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
valid = np.zeros(n_samples, dtype = bool)
s = np.zeros(n_samples)
s[:] = 0
for k in range(K):
ii = np.where(c == k)[0]
a[ii] = 0
if ii.size > 1:
a[ii] = (np.sum(D[ii[:, np.newaxis], ii], axis=0)
- np.diag(D[ii[:, np.newaxis], ii]))/(ii.size - 1)
valid[ii] = True
io = np.where(c != k)[0]
b[io] = np.minimum(b[io], np.mean(D[ii[:, np.newaxis], io], axis=0))
s[valid] = ((b - a)/np.maximum(a, b))[valid]
return np.average(s)
def adaptive_affinity_propagation(S, convergence_iter=40, delay=10
, max_iter=50000, max_damping=0.85, add_noise=1.e-4, max_noise=5.e-4
, Kmin=2, Kmax=None, copy=True, verbose=False, random_state=None):
"""Perform Adaptive Affinity Propagation Clustering of data
Parameters
----------
S: array [n_samples, n_samples]
Matrix of similarities between points
convergence_iter: int, optional, default: 40
Number of iterations with no change in the exemplairs
of estimated clusters to assume the convergence.
delay: int, optional, default: 10
Number of iterations the convergent state should hold before
changing the preference value
max_iter: int, optional, default: 200
Maximum number of iterations
max_damping: float, optional, default: 0.85
The ceiling of the damping factor in adaptive dumping.
add_noise: float, optional, default: 1.e-4
The amount of Gaussian noise in units of std.dev. of similarity
values to add to S per iteration.
max_noise: float, optional, default: 5.e-4
The maximum total amount of Gaussian noise in units of std.dev.
of similarity values to add to S.
Kmin: int, optional, default: 2
The minimum number of clusters to look for.
Kmax: int, optional, default: None
The maximum number of clusters to look for.
copy: boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose: boolean, optional, default: False
The verbosity level
Returns
-------
cluster_centers_indices: array [n_clusters]
index of clusters centers
labels : array [n_samples]
cluster labels for each point
Notes
-----
TODO: add examples in examples/cluster/plot_affinity_propagation.py.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
K. Wang, J. Zhang, D. Li, X. Zhang and T. Guo. Adaptive Affinity
Propagation Clustering. Acta Automatica Sinica, 33(12):1242-1246, 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
damping = .5
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
if Kmax is None:
Kmax = np.inf
# Remove degeneracies
random_state = check_random_state(random_state)
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
# adaptive init
w2 = convergence_iter/8
Ks = np.zeros(convergence_iter)
Kb = np.zeros(convergence_iter)
pm = np.median(S)
ps = 0.01*pm
p =.5*pm
b = 0
nits = 0
best_I = np.array([0])
best_sil = -1
lastC = -1
lastK = n_samples
let_it_pass = 0
nstd = np.std(S[np.triu_indices(S.shape[0], 1)])
noise_count = 0
for it in range(max_iter):
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = p
# Compute responsibilities
Rold = R.copy()
AS = A + S
I = np.argmax(AS, axis=1)
Y = AS[np.arange(n_samples), I]
AS[ind, I[ind]] = - np.finfo(np.double).max
Y2 = np.max(AS, axis=1)
R = S - Y[:, np.newaxis]
R[ind, I[ind]] = S[ind, I[ind]] - Y2[ind]
R = (1 - damping) * R + damping * Rold # Damping
# Compute availabilities
Aold = A
Rp = np.maximum(R, 0)
Rp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
A = np.sum(Rp, axis=0)[np.newaxis, :] - Rp
dA = np.diag(A)
A = np.minimum(A, 0)
A.flat[::n_samples + 1] = dA
A = (1 - damping) * A + damping * Aold # Damping
# Check for convergence
ri = it % convergence_iter
E = (np.diag(A) + np.diag(R)) > 0
e[:, ri] = E
K = np.sum(E, axis=0)
# Adaptive
Ks[ri] = K
if it >= w2 + 1:
Ksw2 = np.hstack([Ks[min(convergence_iter + ri - w2
, convergence_iter):], Ks[max(ri - w2, 0):ri]])
ri_1 = (it - 1) % convergence_iter
Km_1 = np.mean(np.hstack([Ks[max(ri_1 - w2, 0):ri_1]
, Ks[min(convergence_iter + ri_1 - w2, convergence_iter):]]))
decrease = (np.mean(Ksw2) - Km_1) < 0
constant = np.sum(np.abs(Ksw2[:-1] - Ksw2[-1])) == 0
Kb[ri] = int(decrease or constant)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
# Scanning p
if not unconverged and (K > 0):
Hdown = 1
else:
Hdown = 0
b = 0
nits = 0
nits += 1
if Hdown == 1 and nits >= delay:
lastK = K
I = np.where(np.diag(A + R) > 0)[0]
sil = average_silhouette(I, S)
if sil > best_sil and K <= Kmax:
best_sil = sil
best_I = I
lastC = it
if verbose:
print("Converged at %d clusters after %d iterations, sil = %f." % (K, it, sil))
b += 1
q = .1*np.sqrt(K + 50)
p += b*ps/q
nits = 0
if K <= 2:
if verbose:
print("K <= 2 after %d iterations." % it)
break
if let_it_pass < 0:
let_it_pass += 1
vib = (lastC >= 0 and (it - lastC) > 5*(convergence_iter + delay))
osc = np.sum(Kb) < 2*convergence_iter/3 and let_it_pass == 0
if osc or vib:
if verbose:
if osc:
print("Oscillations at %f clusters after %d iterations, p = %f." % (damping, it, p))
else:
print("Vibrations at %f clusters after %d iterations, p = %f." % (damping, it, p))
lastC = it
damping = min(max_damping, damping + 0.05)
if osc :
let_it_pass = -2*convergence_iter/3
if damping >= max_damping:
if vib and nstd > 0 and np.sqrt(noise_count)*add_noise < max_noise :
S += add_noise*nstd*random_state.randn(n_samples, n_samples)
noise_count += 1
else:
p += ps
else:
if verbose:
print("max_iter reached.")
I = best_I
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
return cluster_centers_indices, labels
###############################################################################
class AdaptiveAffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data
Parameters
----------
convergence_iter: int, optional, default: 40
Number of iterations with no change in the exemplairs
of estimated clusters to assume the convergence.
delay: int, optional, default: 10
Number of iterations the convergent state should hold before
changing the preference value
max_damping: float, optional, default: 0.85
The ceiling of the damping factor in adaptive dumping.
add_noise: float, optional, default: 1.e-4
The amount of Gaussian noise in units of std.dev. of similarity
values to add to S per iteration.
max_noise: float, optional, default: 5.e-4
The maximum total amount of Gaussian noise in units of std.dev.
of similarity values to add to S.
max_iter: int, optional, default: 200
Maximum number of iterations
min_clusters: int, optional, default: 2
The minimum number of clusters to look for.
max_clusters: int, optional, default: None
The maximum number of clusters to look for.
copy: boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
affinity: string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose: boolean, optional, default: False
Whether to be verbose.
Attributes
----------
`cluster_centers_indices_` : array, [n_clusters]
Indices of cluster centers
`labels_` : array, [n_samples]
Labels of each point
`affinity_matrix_` : array-like, [n_samples, n_samples]
Stores the affinity matrix used in ``fit``.
Notes
-----
TODO: add examples in examples/cluster/plot_affinity_propagation.py.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
K. Wang, J. Zhang, D. Li, X. Zhang and T. Guo. Adaptive Affinity
Propagation Clustering. Acta Automatica Sinica, 33(12):1242-1246, 2007
"""
def __init__(self, convergence_iter=40, delay=10, max_damping=.95
, add_noise=1.e-4, max_noise=5.e-4, max_iter=50000
, min_clusters=2, max_clusters=None, copy=True
, affinity='euclidean', verbose=False, random_state=None):
self.convergence_iter = convergence_iter
self.delay = delay
self.max_damping = max_damping
self.add_noise = add_noise
self.max_noise = max_noise
self.max_iter = max_iter
self.min_clusters = min_clusters
self.max_clusters = max_clusters
self.copy = copy
self.verbose = verbose
self.affinity = affinity
self.random_state = random_state
@property
def _pairwise(self):
return self.affinity is "precomputed"
def fit(self, X):
""" Create affinity matrix from negative euclidean distances, then
apply adaptive affinity propagation clustering.
Parameters
----------
X: array [n_samples, n_features] or [n_samples, n_samples]
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
if X.shape[0] == X.shape[1] and not self._pairwise:
warnings.warn("The API of AffinityPropagation has changed."
"Now ``fit`` constructs an affinity matrix from the"
" data. To use a custom affinity matrix, set "
"``affinity=precomputed``.")
if self.affinity is "precomputed":
self.affinity_matrix_ = X
elif self.affinity is "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_ = adaptive_affinity_propagation(
self.affinity_matrix_
, convergence_iter=self.convergence_iter
, delay=self.delay
, max_damping=self.max_damping
, add_noise=self.add_noise
, max_noise=self.max_noise
, max_iter=self.max_iter
, Kmin=self.min_clusters
, Kmax=self.max_clusters
, copy=self.copy
, verbose=self.verbose
, random_state=self.random_state)
return self
|
ilyapatrushev/isimage
|
isimage/adaptive_affinity_propagation_.py
|
Python
|
gpl-2.0
| 14,868
|
[
"Gaussian"
] |
d9cda5f8a7433ef80fd49b75c5d49ae942de468cb962d156c78c2060f3d75b2b
|
##
## Session 5: Generative Models
##
#
#
# Learning Goals
#
#
# - [Introduction]
# - [Generative Adversarial Networks]
# - [Input Pipelines]
# - [GAN/DCGAN]
# - [Extensions]
# - [Recurrent Networks]
# - [Basic RNN Cell]
# - [LSTM RNN Cell]
# - [GRU RNN Cell]
#
# - [Character Langauge Model]
# - [Setting up the Data]
# - [Creating the Model]
# - [Loss]
# - [Clipping the Gradient]
# - [Training]
# - [Extensions]
#
# - [DRAW Network] (???)
#
# - [Future]
#
# - [Homework]
#
# - [Examples]
#
# - [Reading]
#
##
## Introduction
##
# So far we've seen the basics of neural networks, how they can be
# used for encoding large datasets, or for predicting labels. We've
# also seen how to interrogate the deeper representations that
# networks learn in order to help with their objective, and how
# ampliyfing some of these objectives led to creating deep dream.
# Finally, we saw how the representations in deep nets trained on
# object recognition are capable of representing both style and
# content, and how we could independently manipulate a new image to
# have the style of one image, and the content of another.
#
# In this session we'll start to explore some more generative models.
# We've already seen how an autoencoder is composed of both an
# encoder which takes an input and represents it into some hidden
# state vector. From this hidden state vector, a decoder is capable
# of resynthsizing the original input, though with some loss. So
# think back to the the decoders that we've already built. It has an
# internal state, and from that state, it can express the entire
# distribution of the original data, that is, it can express any
# possible image that is has seen.
#
# We call that a generative model as it is capable of generating the
# distribution of the data. Contrast this to the latter half of
# Session 3 when we saw how ot label an image using supervised
# learning. This model is really trying to discriminate the data
# distribution based on the extra labels that we have. So this is
# another helpful distinction with machine learning algorithms, ones
# that are generative and others that are discriminative.
#
# In this session, we'll explore more generative models, and states
# can be used to generate data in two other very powerful generative
# networks, one based on game theory called the generative
# adversarial network, and another capable of remembering and
# forgetting over time, allowing us to model dynamic content and
# sequences, called the recurrent neural network.
#
##
## Generative Adversarial Networks (GAN)
##
# In session 3, we were briefly introduced to the Variational
# Autoencoder. This network was very powerful because it encompasses
# a very strong idea. And that idea is measuring distance not
# necessarily based on pixels, but in some "semantic space". And I
# mentioned then that we'd see another type of network capable of
# generating even better images of CelebNet.
#
# So this is where we're heading...
#
# We're now going to see how to do that using what's called the
# generative adversarial network.
#
# The generative adversarial network is actually two networks. One
# called the generator, and another called the discriminator. The
# basic idea is the generator is trying to create things which look
# like the training data. So for images, more images that look like
# the training data. The discriminator has to guess whether what its
# given is a real training example. Or whether its the output of the
# generator. By training one after another, you ensure neither are
# ever too strong, but both grow stronger together. The discriminator
# is also learning a distance function! This is pretty cool because
# we no longer need to measure pixel-based distance, but we learn the
# distance function entirely!
#
# The Generative Adversarial Network, or GAN, for short, are in a
# way, very similar to the autoencoder we created in session 3. Or at
# least the implementation of it is. The discriminator is a lot like
# the encoder part of this network, except instead of going down to
# the 64 dimensions we used in our autoencoder, we'll reduce our
# input down to a single value, yes or no, 0 or 1, denoting yes its a
# true training example, or no, it's a generated one.
#
# And the generator network is exactly like the decoder of the
# autoencoder. Except, there is nothing feeding into this inner
# layer. It is just on its own. From whatever vector of hidden values
# it starts off with, it will generate a new example meant to look
# just like the training data. One pitfall of this model is there is
# no explicit encoding of an input. Meaning, you can't take an input
# and find what would possibly generate it. However, there are recent
# extensions to this model which make it more like the autoencoder
# framework, allowing it to do this.
#
#
# Input Pipelines
#
# Before we get started, we're going to need to work with a very
# large image dataset, the CelebNet dataset. In session 1, we loaded
# this dataset but only grabbed the first 1000 images. That's because
# loading all 200 thousand images would take up a lot of memory which
# we'd rather not have to do. And in Session 3 we were introduced
# again to the CelebNet and Sita Sings the Blues which required us to
# load a lot of images. I glossed over the details of the input
# pipeline then so we could focus on learning the basics of neural
# networks. But I think now we're ready to see how to handle some
# larger datasets.
#
# Tensorflow provides operations for taking a list of files, using
# that list to load the data pointed to it, decoding that file's data
# as an image, and creating shuffled minibatches. All of this is put
# into a queue and managed by queuerunners and coordinators.
#
# As you may have already seen in the Variational Autoencoder's code,
# I've provided a simple interface for creating such an input
# pipeline using image files which will also apply cropping and
# reshaping of images in the pipeline so you don't have to deal with
# any of it. Let's see how we can use it to load the CelebNet
# dataset.
#
#
# Let's first get the list of all the CelebNet files:
print("Loading tensorflow...")
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# dja
#import os
plt.style.use('bmh')
import datetime
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
plt.ion()
plt.figure(figsize=(4, 4))
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.filterwarnings("ignore", category=MatplotlibDeprecationWarning)
def wait(n):
#plt.pause(n)
plt.pause(1)
#input("(press enter)")
print("Loading celebrities...")
from libs.datasets import CELEB
files = CELEB()
# And then create our input pipeline to create shuffled minibatches
# and crop the images to a standard shape. This will require us to
# specify the list of files, how large each minibatch is, how many
# epochs we want to run for, and how we want the images to be
# cropped.
from libs.dataset_utils import create_input_pipeline
batch_size = 100
n_epochs = 10
input_shape = [218, 178, 3]
crop_shape = [64, 64, 3]
crop_factor = 0.8
batch = create_input_pipeline(
files=files,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape)
# Then when we are ready to use the batch generator, we'll need to
# create a `Coordinator` and specify this to tensorflow using the
# `start_queue_runners` method in order to provide the data:
sess = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# We can grab our data using our `batch` generator like so:
batch_xs = sess.run(batch)
# We get batch_size at a time, so 100
print("batch_xs.shape: ", batch_xs.shape)
# The datatype is float32 since what is what we use in the tensorflow
# graph
# And the max value still has the original image range from 0-255
print("batch_xs.dtype: ", batch_xs.dtype, " max: ", np.max(batch_xs.dtype))
# So to plot it, we'll need to divide by 255.
plt.imshow(batch_xs[0] / 255.0)
wait(1)
# Let's see how to make use of this while we train a generative
# adversarial network!
#
#
# GAN/DCGAN
#
# Inside the libs directory, you'll find `gan.py` which shows how to
# create a generative adversarial network with or without
# convolution, and how to train it using the CelebNet dataset. Let's
# step through the code and then I'll show you what it's capable of
# doing.
#
# -- Code demonstration not transcribed. --
#
#
# Extensions
#
# So it turns out there are a ton of very fun and interesting
# extensions when you have a model in this space. It turns out that
# you can perform addition in the latent space. I'll just show you
# Alec Radford's code base on github to show you what that looks
# like.
#
##
## Recurrent Networks (RNN)
##
# Up until now, all of the networks that we've learned and worked
# with really have no sense of time. They are static. They cannot
# remember sequences, nor can they understand order outside of the
# spatial dimensions we offer it. Imagine for instance that we wanted
# a network capable of reading. As input, it is given one letter at a
# time. So let's say it were given the letters 'n', 'e', 't', 'w',
# 'o', 'r', and we wanted it to learn to output 'k'. It would need to
# be able to reason about inputs it received before the last one it
# received, the letters before 'r'. But it's not just letters.
#
# Consider the way we look at the world. We don't simply download a
# high resolution image of the world in front of us. We move our
# eyes. Each fixation takes in new information and each of these
# together in sequence help us perceive and act. That again is a
# sequential process.
#
# Recurrent neural networks let us reason about information over
# multiple timesteps. They are able to encode what it has seen in the
# past as if it has a memory of its own. It does this by basically
# creating one HUGE network that expands over time. It can reason
# about the current timestep by conditioning on what it has already
# seen. By giving it many sequences as batches, it can learn a
# distribution over sequences which can model the current timestep
# given the previous timesteps. But in order for this to be
# practical, we specify at each timestep, or each time it views an
# input, that the weights in each new timestep cannot change. We also
# include a new matrix, `H`, which reasons about the past timestep,
# connecting each new timestep. For this reason, we can just think of
# recurrent networks as ones with loops in it.
#
# Other than that, they are exactly like every other network we've
# come across! They will have an input and an output. They'll need a
# loss or an objective function to optimize which will relate what we
# want the network to output for some given set of inputs. And
# they'll be trained with gradient descent and backprop.
#
#
# Basic RNN Cell
#
# The basic recurrent cell can be used in tensorflow as
# `tf.nn.rnn_cell.BasicRNNCell`. Though for most complex sequences,
# especially longer sequences, this is almost never a good idea. That
# is because the basic RNN cell does not do very well as time goes
# on. To understand why this is, we'll have to learn a bit more about
# how backprop works. When we perform backrprop, we're multiplying
# gradients from the output back to the input. As the network gets
# deeper, there are more multiplications along the way from the
# output to the input.
#
# Same for recurrent networks. Remember, their just like a normal
# feedforward network with each new timestep creating a new layer. So
# if we're creating an infinitely deep network, what will happen to
# all our multiplications? Well if the derivatives are all greater
# than 1, then they will very quickly grow to infinity. And if they
# are less than 1, then they will very quickly grow to 0. That makes
# them very difficult to train in practice. The problem is known in
# the literature as the exploding or vanishing gradient problem.
# Luckily, we don't have to figure out how to solve it, because some
# very clever people have already come up with a solution, in 1997!,
# yea, what were you doing in 1997. Probably not coming up with they
# called the long-short-term-memory, or LSTM.
#
#
# LSTM RNN Cell
#
# The mechanics of this are unforunately far beyond the scope of this
# course, but put simply, it uses a combinations of gating cells to
# control its contents and by having gates, it is able to block the
# flow of the gradient, avoiding too many multiplications during
# backprop. For more details, I highly recommend reading:
# https://colah.github.io/posts/2015-08-Understanding-LSTMs/.
#
# In tensorflow, we can make use of this cell using
# `tf.nn.rnn_cell.LSTMCell`.
#
#
# GRU RNN Cell
#
# One last cell type is worth mentioning, the gated recurrent unit,
# or GRU. Again, beyond the scope of this class. Just think of it as
# a simplifed version of the LSTM with 2 gates instead of 4, though
# that is not an accurate description. In Tensorflow we can use this
# with `tf.nn.rnn_cell.GRUCell`.
#
##
## Character Langauge Model
##
# We'll now try a fun application of recurrent networks where we try
# to model a corpus of text, one character at a time. The basic idea
# is to take one character at a time and try to predict the next
# character in sequence. Given enough sequences, the model is capable
# of generating entirely new sequences all on its own.
#
#
# Setting up the Data
#
# For data, we're going to start with text. You can basically take
# any text file that is sufficiently long, as we'll need a lot of it,
# and try to use this. This website seems like an interesting place
# to begin: http://textfiles.com/directory.html and project
# guttenberg https://www.gutenberg.org/browse/scores/top.
# http://prize.hutter1.net/ also has a 50k euro reward for
# compressing wikipedia. Let's try w/ Alice's Adventures in
# Wonderland by Lewis Carroll:
print("Reading text file...")
#from six.moves import urllib
#f, _ = urllib.request.urlretrieve('https://www.gutenberg.org/cache/epub/11/pg11.txt', 'alice.txt')
f="alice2.txt"
with open(f, 'r') as fp:
txt = fp.read()
# And let's find out what's inside this text file by creating a set
# of all possible characters.
vocab = list(set(txt))
print ("txt: ", len(txt), " vocab: ", len(vocab))
# Great so we now have about 164 thousand characters and 85 unique
# characters in our vocabulary which we can use to help us train a
# model of language. Rather than use the characters, we'll convert
# each character to a unique integer. We'll later see that when we
# work with words, we can achieve a similar goal using a very popular
# model called word2vec:
# https://www.tensorflow.org/versions/r0.9/tutorials/word2vec/index.html
#
# We'll first create a look up table which will map a character to an
# integer:
print("Creating encoder...")
encoder = dict(zip(vocab, range(len(vocab))))
print("Creating decoder...")
decoder = dict(zip(range(len(vocab)), vocab))
#
# Creating the Model
#
# For our model, we'll need to define a few parameters.
# Number of sequences in a mini batch
batch_size = 100
# Number of characters in a sequence
sequence_length = 100
# Number of cells in our LSTM layer
n_cells = 256
# Number of LSTM layers
n_layers = 2
# Total number of characters in the one-hot encoding
n_chars = len(vocab)
# Now create the input and output to the network. Rather than having
# `batch size` x `number of features`; or `batch size` x `height` x
# `width` x `channels`; we're going to have `batch size` x `sequence
# length`.
X = tf.placeholder(tf.int32, [None, sequence_length], name='X')
# We'll have a placeholder for our true outputs
Y = tf.placeholder(tf.int32, [None, sequence_length], name='Y')
# Now remember with MNIST that we used a one-hot vector
# representation of our numbers. We could transform our input data
# into such a representation. But instead, we'll use
# `tf.nn.embedding_lookup` so that we don't need to compute the
# encoded vector. Let's see how this works:
# we first create a variable to take us from our one-hot
# representation to our LSTM cells
embedding = tf.get_variable("embedding", [n_chars, n_cells])
# And then use tensorflow's embedding lookup to look up the ids in X
Xs = tf.nn.embedding_lookup(embedding, X)
# The resulting lookups are concatenated into a dense tensor
print("Xs.get_shape: ", Xs.get_shape().as_list())
# To create a recurrent network, we're going to need to slice our
# sequences into individual inputs. That will give us timestep lists
# which are each `batch_size` x `input_size`. Each character will
# then be connected to a recurrent layer composed of `n_cells` LSTM
# units.
# Let's create a name scope for the operations to clean things up in
# our graph
with tf.name_scope('reslice'):
Xs = [tf.squeeze(seq, [1])
for seq in tf.split(1, sequence_length, Xs)]
# Now we'll create our recurrent layer composed of LSTM cells.
cells = tf.nn.rnn_cell.BasicLSTMCell(num_units=n_cells, state_is_tuple=True)
# We'll initialize our LSTMs using the convenience method provided by
# tensorflow. We could explicitly define the batch size here or use
# the `tf.shape` method to compute it based on whatever `X` is,
# letting us feed in different sizes into the graph.
initial_state = cells.zero_state(tf.shape(X)[0], tf.float32)
# Great now we have a layer of recurrent cells and a way to
# initialize them. If we wanted to make this a multi-layer recurrent
# network, we could use the `MultiRNNCell` like so:
if n_layers > 1:
cells = tf.nn.rnn_cell.MultiRNNCell(
[cells] * n_layers, state_is_tuple=True)
initial_state = cells.zero_state(tf.shape(X)[0], tf.float32)
# In either case, the cells are composed of their outputs as
# modulated by the LSTM's output gate, and whatever is currently
# stored in its memory contents. Now let's connect our input to it.
# this will return us a list of outputs of every element in our
# sequence.
# Each output is `batch_size` x `n_cells` of output.
# It will also return the state as a tuple of the n_cells's memory
# and
# their output to connect to the time we use the recurrent layer.
outputs, state = tf.nn.rnn(cells, Xs, initial_state=initial_state)
# We'll now stack all our outputs for every cell
outputs_flat = tf.reshape(tf.concat(1, outputs), [-1, n_cells])
# For our output, we'll simply try to predict the very next timestep.
# So if our input sequence was "networ", our output sequence should
# be: "etwork". This will give us the same batch size coming out, and
# the same number of elements as our input sequence.
with tf.variable_scope('prediction'):
W = tf.get_variable(
"W",
shape=[n_cells, n_chars],
initializer=tf.random_normal_initializer(stddev=0.1))
b = tf.get_variable(
"b",
shape=[n_chars],
initializer=tf.random_normal_initializer(stddev=0.1))
# Find the output prediction of every single character in our minibatch
# we denote the pre-activation prediction, logits.
logits = tf.matmul(outputs_flat, W) + b
# We get the probabilistic version by calculating the softmax of this
probs = tf.nn.softmax(logits)
print("probs: ", probs)
# And then we can find the index of maximum probability
#Y_pred = tf.argmax(probs)
Y_pred = tf.argmax(probs, 1)
#
# Loss
#
# Our loss function will take the reshaped predictions and targets,
# and compute the softmax cross entropy.
with tf.variable_scope('loss'):
# Compute mean cross entropy loss for each output.
Y_true_flat = tf.reshape(tf.concat(1, Y), [-1])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, Y_true_flat)
mean_loss = tf.reduce_mean(loss)
#
# Clipping the Gradient
#
# Normally, we would just create an optimizer, give it a learning
# rate, and tell it to minize our loss. But with recurrent networks,
# we can help out a bit by telling it to clip gradients. That helps
# with the exploding gradient problem, ensureing they can't get any
# bigger than the value we tell it. We can do that in tensorflow by
# iterating over every gradient and variable, and changing their
# value before we apply their update to every trainable variable.
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
gradients = []
clip = tf.constant(5.0, name="clip")
for grad, var in optimizer.compute_gradients(mean_loss):
gradients.append((tf.clip_by_value(grad, -clip, clip), var))
updates = optimizer.apply_gradients(gradients)
# We could also explore other methods of clipping the gradient based
# on a percentile of the norm of activations or other similar
# methods, like when we explored deep dream regularization. But the
# LSTM has been built to help regularize the network through its own
# gating mechanisms, so this may not be the best idea for your
# problem. Really, the only way to know is to try different
# approaches and see how it effects the output on your problem.
#
#
# Training
#
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
cursor = 0
it_i = 0
print("Begin training...")
while True:
print("it_i: ", it_i, end="")
Xs, Ys = [], []
for batch_i in range(batch_size):
if (cursor + sequence_length) >= len(txt) - sequence_length - 1:
cursor = 0
Xs.append([encoder[ch]
for ch in txt[cursor:cursor + sequence_length]])
Ys.append([encoder[ch]
for ch in txt[cursor + 1: cursor + sequence_length + 1]])
cursor = (cursor + sequence_length)
Xs = np.array(Xs).astype(np.int32)
Ys = np.array(Ys).astype(np.int32)
loss_val, _ = sess.run([mean_loss, updates],
feed_dict={X: Xs, Y: Ys})
print(" loss_val: ", loss_val)
if it_i % 500 == 0:
# otra linea con error? de donde salio? no esta en upstream...
#p = np.argmax(sess.run([Y_pred], feed_dict={X: Xs})[0], axis=1)
p = sess.run([Y_pred], feed_dict={X: Xs})[0]
preds = [decoder[p_i] for p_i in p]
print("".join(preds).split('\n'))
it_i += 1
#
# Extensions
#
# There are also certainly a lot of additions we can add to speed up
# or help with training including adding dropout or using batch
# normalization that I haven't gone into here. Also when dealing with
# variable length sequences, you may want to consider using a special
# token to denote the last character or element in your sequence.
#
# As for applications, *completley endless*. And I think that is
# really what makes this field so exciting right now. There doesn't
# seem to be any limit to what is possible right now. You are not
# just limited to text first of all. You may want to feed in MIDI
# data to create a piece of algorithmic music. I've tried it with raw
# sound data and this even works, but it requires a lot of memory and
# at least 30k iterations to run before it sounds like anything. Or
# perhaps you might try some other unexpected text based information,
# such as encodings of image data like JPEG in base64. Or other
# compressed data formats. Or perhaps you are more adventurous and
# want to try using what you've learned here with the previous
# sessions to add recurrent layers to a traditional convolutional
# model.
#
##
## Future
##
# If you're still here, then I'm really excited for you and to see
# what you'll create. By now, you've seen most of the major building
# blocks with neural networks. From here, you are only limited by the
# time it takes to train all of the interesting ideas you'll have.
# But there is still so much more to discover, and it's very likely
# that this entire course is already out of date, because this field
# just moves incredibly fast. In any case, the applications of these
# techniques are still fairly stagnant, so if you're here to see how
# your creative practice could grow with these techniques, then you
# should already have plenty to discover.
#
# I'm very excited about how the field is moving. Often, it is very
# hard to find labels for a lot of data in a meaningful and
# consistent way. But there is a lot of interesting stuff starting to
# emerge in the unsupervised models. Those are the models that just
# take data in, and the computer reasons about it. And even more
# interesting is the combination of general purpose learning
# algorithms. That's really where reinforcement learning is starting
# to shine. But that's for another course, perhaps.
#
##
## Reading
##
# Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David
# Warde-Farley, Sherjil Ozair, Aaron Courville, Yoshua Bengio.
# Generative Adversarial Networks. 2014.
# https://arxiv.org/abs/1406.2661
#
# Ian J. Goodfellow, Jonathon Shlens, Christian Szegedy. Explaining
# and Harnessing Adversarial Examples. 2014.
#
# Alec Radford, Luke Metz, Soumith Chintala. Unsupervised
# Representation Learning with Deep Convolutional Generative
# Adversarial Networks. 2015.
# https://arxiv.org/abs/1511.06434
#
# Emily Denton, Soumith Chintala, Arthur Szlam, Rob Fergus.
# Deep Generative Image Models using a Laplacian Pyramid of
# Adversarial Networks. 2015.
# arxiv.org/abs/1506.05751
#
# Anders Boesen Lindbo Larsen, Søren Kaae Sønderby, Hugo Larochelle,
# Ole Winther. Autoencoding beyond pixels using a learned similarity
# metric. 2015.
# https://arxiv.org/abs/1512.09300
#
# Vincent Dumoulin, Ishmael Belghazi, Ben Poole, Alex Lamb, Martin
# Arjovsky, Olivier Mastropietro, Aaron Courville. Adversarially
# Learned Inference. 2016.
# https://arxiv.org/abs/1606.00704
#
# Ilya Sutskever, James Martens, and Geoffrey Hinton. Generating Text
# with Recurrent Neural Networks, ICML 2011.
#
# A. Graves. Generating sequences with recurrent neural networks. In
# Arxiv preprint, arXiv:1308.0850, 2013.
#
# T. Mikolov, I. Sutskever, K. Chen, G. S. Corrado, and J. Dean.
# Distributed representations of words and phrases and their
# compositionality. In Advances in
# Neural Information Processing Systems, pages 3111–3119, 2013.
#
# J. Pennington, R. Socher, and C. D. Manning. Glove: Global vectors
# for word representation. Proceedings of the Empiricial Methods in
# Natural Language Processing (EMNLP 2014), 12, 2014.
#
# Yoon Kim, Yacine Jernite, David Sontag, Alexander M. Rush.
# Character-Aware Neural Language Models. 2015.
# https://arxiv.org/abs/1508.06615
#
# I. Sutskever, J. Martens, and G. Hinton. Generating text with
# recurrent neural networks. In L. Getoor and T. Scheffer, editors,
# Proceedings of the 28th International Conference on Machine
# Learning (ICML-11), ICML ’11, pages 1017–1024, New York, NY, USA,
# June 2011. ACM.
#
# eop
|
dariox2/CADL
|
session-5/l5b01.py
|
Python
|
apache-2.0
| 27,030
|
[
"exciting"
] |
a96cb87d653d97f7a336d31e40348108c601e5ec75c8e3a46dce45a74ae68299
|
"""
Contains functionality needed in every web interface
"""
import logging
import operator
import os
import re
from gettext import gettext
import pkg_resources
pkg_resources.require("SQLAlchemy >= 0.4")
from sqlalchemy import func, and_, select
from paste.httpexceptions import HTTPBadRequest, HTTPInternalServerError
from paste.httpexceptions import HTTPNotImplemented, HTTPRequestRangeNotSatisfiable
from galaxy import exceptions
from galaxy.exceptions import ItemAccessibilityException, ItemDeletionException, ItemOwnershipException
from galaxy.exceptions import MessageException
from galaxy import web
from galaxy import model
from galaxy import security
from galaxy import util
from galaxy import objectstore
from galaxy.web import error, url_for
from galaxy.web.form_builder import AddressField, CheckboxField, SelectField, TextArea, TextField
from galaxy.web.form_builder import build_select_field, HistoryField, PasswordField, WorkflowField, WorkflowMappingField
from galaxy.workflow.modules import module_factory, WorkflowModuleInjector, MissingToolException
from galaxy.model.orm import eagerload, eagerload_all, desc, not_
from galaxy.security.validate_user_input import validate_publicname
from galaxy.util.sanitize_html import sanitize_html
from galaxy.model.item_attrs import Dictifiable, UsesAnnotations
from galaxy.datatypes.interval import ChromatinInteractions
from galaxy.datatypes.data import Text
from galaxy.model import ExtendedMetadata, ExtendedMetadataIndex, LibraryDatasetDatasetAssociation, HistoryDatasetAssociation
from galaxy.managers import api_keys
from galaxy.managers import tags
from galaxy.managers import base as managers_base
from galaxy.datatypes.metadata import FileParameter
from galaxy.tools.parameters import visit_input_values
from galaxy.tools.parameters.basic import DataToolParameter
from galaxy.tools.parameters.basic import DataCollectionToolParameter
from galaxy.util.json import dumps
from galaxy.workflow.modules import ToolModule
from galaxy.workflow.steps import attach_ordered_steps
from galaxy.util import validation
log = logging.getLogger( __name__ )
# States for passing messages
SUCCESS, INFO, WARNING, ERROR = "done", "info", "warning", "error"
def _is_valid_slug( slug ):
""" Returns true if slug is valid. """
VALID_SLUG_RE = re.compile( "^[a-z0-9\-]+$" )
return VALID_SLUG_RE.match( slug )
class BaseController( object ):
"""
Base class for Galaxy web application controllers.
"""
def __init__( self, app ):
"""Initialize an interface for application 'app'"""
self.app = app
self.sa_session = app.model.context
def get_toolbox(self):
"""Returns the application toolbox"""
return self.app.toolbox
def get_class( self, class_name ):
""" Returns the class object that a string denotes. Without this method, we'd have to do eval(<class_name>). """
return managers_base.get_class( class_name )
def get_object( self, trans, id, class_name, check_ownership=False, check_accessible=False, deleted=None ):
"""
Convenience method to get a model object with the specified checks.
"""
return managers_base.get_object( trans, id, class_name, check_ownership=check_ownership, check_accessible=check_accessible, deleted=deleted )
# this should be here - but catching errors from sharable item controllers that *should* have SharableItemMixin
# but *don't* then becomes difficult
#def security_check( self, trans, item, check_ownership=False, check_accessible=False ):
# log.warn( 'BaseController.security_check: %s, %b, %b', str( item ), check_ownership, check_accessible )
# # meant to be overridden in SharableSecurityMixin
# return item
def get_user( self, trans, id, check_ownership=False, check_accessible=False, deleted=None ):
return self.get_object( trans, id, 'User', check_ownership=False, check_accessible=False, deleted=deleted )
def get_group( self, trans, id, check_ownership=False, check_accessible=False, deleted=None ):
return self.get_object( trans, id, 'Group', check_ownership=False, check_accessible=False, deleted=deleted )
def get_role( self, trans, id, check_ownership=False, check_accessible=False, deleted=None ):
return self.get_object( trans, id, 'Role', check_ownership=False, check_accessible=False, deleted=deleted )
def encode_all_ids( self, trans, rval, recursive=False ):
"""
Encodes all integer values in the dict rval whose keys are 'id' or end with '_id'
It might be useful to turn this in to a decorator
"""
return trans.security.encode_all_ids( rval, recursive=recursive )
# incoming param validation
# should probably be in sep. serializer class/object _used_ by controller
def validate_and_sanitize_basestring( self, key, val ):
return validation.validate_and_sanitize_basestring( key, val )
def validate_and_sanitize_basestring_list( self, key, val ):
return validation.validate_and_sanitize_basestring_list( key, val )
def validate_boolean( self, key, val ):
return validation.validate_boolean( key, val )
Root = BaseController
class BaseUIController( BaseController ):
def get_object( self, trans, id, class_name, check_ownership=False, check_accessible=False, deleted=None ):
try:
return BaseController.get_object( self, trans, id, class_name,
check_ownership=check_ownership, check_accessible=check_accessible, deleted=deleted )
except MessageException:
raise # handled in the caller
except:
log.exception( "Exception in get_object check for %s %s:" % ( class_name, str( id ) ) )
raise Exception( 'Server error retrieving %s id ( %s ).' % ( class_name, str( id ) ) )
class BaseAPIController( BaseController ):
def get_object( self, trans, id, class_name, check_ownership=False, check_accessible=False, deleted=None ):
try:
return BaseController.get_object( self, trans, id, class_name,
check_ownership=check_ownership, check_accessible=check_accessible, deleted=deleted )
except ItemDeletionException, e:
raise HTTPBadRequest( detail="Invalid %s id ( %s ) specified: %s" % ( class_name, str( id ), str( e ) ) )
except MessageException, e:
raise HTTPBadRequest( detail=e.err_msg )
except Exception, e:
log.exception( "Exception in get_object check for %s %s." % ( class_name, str( id ) ) )
raise HTTPInternalServerError( comment=str( e ) )
def validate_in_users_and_groups( self, trans, payload ):
"""
For convenience, in_users and in_groups can be encoded IDs or emails/group names in the API.
"""
def get_id( item, model_class, column ):
try:
return trans.security.decode_id( item )
except:
pass # maybe an email/group name
# this will raise if the item is invalid
return trans.sa_session.query( model_class ).filter( column == item ).first().id
new_in_users = []
new_in_groups = []
invalid = []
for item in util.listify( payload.get( 'in_users', [] ) ):
try:
new_in_users.append( get_id( item, trans.app.model.User, trans.app.model.User.table.c.email ) )
except:
invalid.append( item )
for item in util.listify( payload.get( 'in_groups', [] ) ):
try:
new_in_groups.append( get_id( item, trans.app.model.Group, trans.app.model.Group.table.c.name ) )
except:
invalid.append( item )
if invalid:
msg = "The following value(s) for associated users and/or groups could not be parsed: %s." % ', '.join( invalid )
msg += " Valid values are email addresses of users, names of groups, or IDs of both."
raise Exception( msg )
payload['in_users'] = map( str, new_in_users )
payload['in_groups'] = map( str, new_in_groups )
def not_implemented( self, trans, **kwd ):
raise HTTPNotImplemented()
class Datatype( object ):
"""Used for storing in-memory list of datatypes currently in the datatypes registry."""
def __init__( self, extension, dtype, type_extension, mimetype, display_in_upload ):
self.extension = extension
self.dtype = dtype
self.type_extension = type_extension
self.mimetype = mimetype
self.display_in_upload = display_in_upload
#
# -- Mixins for working with Galaxy objects. --
#
class CreatesUsersMixin:
"""
Mixin centralizing logic for user creation between web and API controller.
Web controller handles additional features such e-mail subscription, activation,
user forms, etc.... API created users are much more vanilla for the time being.
"""
def create_user( self, trans, email, username, password ):
user = trans.app.model.User( email=email )
user.set_password_cleartext( password )
user.username = username
if trans.app.config.user_activation_on:
user.active = False
else:
user.active = True # Activation is off, every new user is active by default.
trans.sa_session.add( user )
trans.sa_session.flush()
trans.app.security_agent.create_private_user_role( user )
if trans.webapp.name == 'galaxy':
# We set default user permissions, before we log in and set the default history permissions
trans.app.security_agent.user_set_default_permissions( user,
default_access_private=trans.app.config.new_user_dataset_access_role_default_private )
return user
class CreatesApiKeysMixin:
"""
Mixing centralizing logic for creating API keys for user objects.
Deprecated - please use api_keys.ApiKeyManager for new development.
"""
def create_api_key( self, trans, user ):
return api_keys.ApiKeyManager( trans.app ).create_api_key( user )
class SharableItemSecurityMixin:
""" Mixin for handling security for sharable items. """
def security_check( self, trans, item, check_ownership=False, check_accessible=False ):
""" Security checks for an item: checks if (a) user owns item or (b) item is accessible to user. """
return managers_base.security_check( trans, item, check_ownership=check_ownership, check_accessible=check_accessible )
class UsesHistoryMixin( SharableItemSecurityMixin ):
""" Mixin for controllers that use History objects. """
def get_history( self, trans, id, check_ownership=True, check_accessible=False, deleted=None ):
"""
Get a History from the database by id, verifying ownership.
"""
history = self.get_object( trans, id, 'History',
check_ownership=check_ownership, check_accessible=check_accessible, deleted=deleted )
history = self.security_check( trans, history, check_ownership, check_accessible )
return history
def get_user_histories( self, trans, user=None, include_deleted=False, only_deleted=False ):
"""
Get all the histories for a given user (defaulting to `trans.user`)
ordered by update time and filtered on whether they've been deleted.
"""
# handle default and/or anonymous user (which still may not have a history yet)
user = user or trans.user
if not user:
current_history = trans.get_history()
return [ current_history ] if current_history else []
history_model = trans.model.History
query = ( trans.sa_session.query( history_model )
.filter( history_model.user == user )
.order_by( desc( history_model.table.c.update_time ) ) )
if only_deleted:
query = query.filter( history_model.deleted == True )
elif not include_deleted:
query = query.filter( history_model.deleted == False )
return query.all()
def get_history_datasets( self, trans, history, show_deleted=False, show_hidden=False, show_purged=False ):
""" Returns history's datasets. """
query = trans.sa_session.query( trans.model.HistoryDatasetAssociation ) \
.filter( trans.model.HistoryDatasetAssociation.history == history ) \
.options( eagerload( "children" ) ) \
.join( "dataset" ) \
.options( eagerload_all( "dataset.actions" ) ) \
.order_by( trans.model.HistoryDatasetAssociation.hid )
if not show_deleted:
query = query.filter( trans.model.HistoryDatasetAssociation.deleted == False )
if not show_purged:
query = query.filter( trans.model.Dataset.purged == False )
return query.all()
def get_hda_state_counts( self, trans, history, include_deleted=False, include_hidden=False ):
"""
Returns a dictionary with state counts for history's HDAs. Key is a
dataset state, value is the number of states in that count.
"""
# Build query to get (state, count) pairs.
cols_to_select = [ trans.app.model.Dataset.table.c.state, func.count( '*' ) ]
from_obj = trans.app.model.HistoryDatasetAssociation.table.join( trans.app.model.Dataset.table )
conditions = [ trans.app.model.HistoryDatasetAssociation.table.c.history_id == history.id ]
if not include_deleted:
# Only count datasets that have not been deleted.
conditions.append( trans.app.model.HistoryDatasetAssociation.table.c.deleted == False )
if not include_hidden:
# Only count datasets that are visible.
conditions.append( trans.app.model.HistoryDatasetAssociation.table.c.visible == True )
group_by = trans.app.model.Dataset.table.c.state
query = select( columns=cols_to_select,
from_obj=from_obj,
whereclause=and_( *conditions ),
group_by=group_by )
# Initialize count dict with all states.
state_count_dict = {}
for k, state in trans.app.model.Dataset.states.items():
state_count_dict[ state ] = 0
# Process query results, adding to count dict.
for row in trans.sa_session.execute( query ):
state, count = row
state_count_dict[ state ] = count
return state_count_dict
def get_hda_summary_dicts( self, trans, history ):
"""Returns a list of dictionaries containing summary information
for each HDA in the given history.
"""
hda_model = trans.model.HistoryDatasetAssociation
# get state, name, etc.
columns = ( hda_model.name, hda_model.hid, hda_model.id, hda_model.deleted,
trans.model.Dataset.state )
column_keys = [ "name", "hid", "id", "deleted", "state" ]
query = ( trans.sa_session.query( *columns )
.enable_eagerloads( False )
.filter( hda_model.history == history )
.join( trans.model.Dataset )
.order_by( hda_model.hid ) )
# build dictionaries, adding history id and encoding all ids
hda_dicts = []
for hda_tuple in query.all():
hda_dict = dict( zip( column_keys, hda_tuple ) )
hda_dict[ 'history_id' ] = history.id
trans.security.encode_dict_ids( hda_dict )
hda_dicts.append( hda_dict )
return hda_dicts
def _get_hda_state_summaries( self, trans, hda_dict_list ):
"""Returns two dictionaries (in a tuple): state_counts and state_ids.
Each is keyed according to the possible hda states:
_counts contains a sum of the datasets in each state
_ids contains a list of the encoded ids for each hda in that state
hda_dict_list should be a list of hda data in dictionary form.
"""
#TODO: doc to rst
# init counts, ids for each state
state_counts = {}
state_ids = {}
for key, state in trans.app.model.Dataset.states.items():
state_counts[ state ] = 0
state_ids[ state ] = []
for hda_dict in hda_dict_list:
item_state = hda_dict['state']
if not hda_dict['deleted']:
state_counts[ item_state ] = state_counts[ item_state ] + 1
# needs to return all ids (no deleted check)
state_ids[ item_state ].append( hda_dict['id'] )
return ( state_counts, state_ids )
def _get_history_state_from_hdas( self, trans, history, hda_state_counts ):
"""Returns the history state based on the states of the HDAs it contains.
"""
states = trans.app.model.Dataset.states
num_hdas = sum( hda_state_counts.values() )
# (default to ERROR)
state = states.ERROR
if num_hdas == 0:
state = states.NEW
else:
if( ( hda_state_counts[ states.RUNNING ] > 0 )
or ( hda_state_counts[ states.SETTING_METADATA ] > 0 )
or ( hda_state_counts[ states.UPLOAD ] > 0 ) ):
state = states.RUNNING
elif hda_state_counts[ states.QUEUED ] > 0:
state = states.QUEUED
elif( ( hda_state_counts[ states.ERROR ] > 0 )
or ( hda_state_counts[ states.FAILED_METADATA ] > 0 ) ):
state = states.ERROR
elif hda_state_counts[ states.OK ] == num_hdas:
state = states.OK
return state
def get_history_dict( self, trans, history, hda_dictionaries=None ):
"""Returns history data in the form of a dictionary.
"""
history_dict = history.to_dict( view='element', value_mapper={ 'id':trans.security.encode_id })
history_dict[ 'user_id' ] = None
if history.user_id:
history_dict[ 'user_id' ] = trans.security.encode_id( history.user_id )
history_dict[ 'nice_size' ] = history.get_disk_size( nice_size=True )
history_dict[ 'annotation' ] = history.get_item_annotation_str( trans.sa_session, trans.user, history )
if not history_dict[ 'annotation' ]:
history_dict[ 'annotation' ] = ''
#TODO: item_slug url
if history_dict[ 'importable' ] and history_dict[ 'slug' ]:
#TODO: this should be in History (or a superclass of)
username_and_slug = ( '/' ).join(( 'u', history.user.username, 'h', history_dict[ 'slug' ] ))
history_dict[ 'username_and_slug' ] = username_and_slug
hda_summaries = hda_dictionaries if hda_dictionaries else self.get_hda_summary_dicts( trans, history )
#TODO remove the following in v2
( state_counts, state_ids ) = self._get_hda_state_summaries( trans, hda_summaries )
history_dict[ 'state_details' ] = state_counts
history_dict[ 'state_ids' ] = state_ids
history_dict[ 'state' ] = self._get_history_state_from_hdas( trans, history, state_counts )
return history_dict
def set_history_from_dict( self, trans, history, new_data ):
"""
Changes history data using the given dictionary new_data.
"""
#precondition: ownership of the history has already been checked
#precondition: user is not None (many of these attributes require a user to set properly)
user = trans.get_user()
# published histories should always be importable
if 'published' in new_data and new_data[ 'published' ] and not history.importable:
new_data[ 'importable' ] = True
# send what we can down into the model
changed = history.set_from_dict( new_data )
# the rest (often involving the trans) - do here
#TODO: the next two could be an aspect/mixin
#TODO: also need a way to check whether they've changed - assume they have for now
if 'annotation' in new_data:
history.add_item_annotation( trans.sa_session, user, history, new_data[ 'annotation' ] )
changed[ 'annotation' ] = new_data[ 'annotation' ]
if 'tags' in new_data:
self.set_tags_from_list( trans, history, new_data[ 'tags' ], user=user )
changed[ 'tags' ] = new_data[ 'tags' ]
#TODO: sharing with user/permissions?
if changed.keys():
trans.sa_session.flush()
# create a slug if none exists (setting importable to false should not remove the slug)
if 'importable' in changed and changed[ 'importable' ] and not history.slug:
self._create_history_slug( trans, history )
return changed
def _create_history_slug( self, trans, history ):
#TODO: mixins need to die a quick, horrible death
# (this is duplicate from SharableMixin which can't be added to UsesHistory without exposing various urls)
cur_slug = history.slug
# Setup slug base.
if cur_slug is None or cur_slug == "":
# Item can have either a name or a title.
item_name = history.name
slug_base = util.ready_name_for_url( item_name.lower() )
else:
slug_base = cur_slug
# Using slug base, find a slug that is not taken. If slug is taken,
# add integer to end.
new_slug = slug_base
count = 1
while ( trans.sa_session.query( trans.app.model.History )
.filter_by( user=history.user, slug=new_slug, importable=True )
.count() != 0 ):
# Slug taken; choose a new slug based on count. This approach can
# handle numerous items with the same name gracefully.
new_slug = '%s-%i' % ( slug_base, count )
count += 1
# Set slug and return.
trans.sa_session.add( history )
history.slug = new_slug
trans.sa_session.flush()
return history.slug == cur_slug
class ExportsHistoryMixin:
def serve_ready_history_export( self, trans, jeha ):
assert jeha.ready
if jeha.compressed:
trans.response.set_content_type( 'application/x-gzip' )
else:
trans.response.set_content_type( 'application/x-tar' )
disposition = 'attachment; filename="%s"' % jeha.export_name
trans.response.headers["Content-Disposition"] = disposition
return open( trans.app.object_store.get_filename( jeha.dataset ) )
def queue_history_export( self, trans, history, gzip=True, include_hidden=False, include_deleted=False ):
# Convert options to booleans.
#
if isinstance( gzip, basestring ):
gzip = ( gzip in [ 'True', 'true', 'T', 't' ] )
if isinstance( include_hidden, basestring ):
include_hidden = ( include_hidden in [ 'True', 'true', 'T', 't' ] )
if isinstance( include_deleted, basestring ):
include_deleted = ( include_deleted in [ 'True', 'true', 'T', 't' ] )
# Run job to do export.
history_exp_tool = trans.app.toolbox.get_tool( '__EXPORT_HISTORY__' )
params = {
'history_to_export': history,
'compress': gzip,
'include_hidden': include_hidden,
'include_deleted': include_deleted
}
history_exp_tool.execute( trans, incoming=params, history=history, set_output_hid=True )
class ImportsHistoryMixin:
def queue_history_import( self, trans, archive_type, archive_source ):
# Run job to do import.
history_imp_tool = trans.app.toolbox.get_tool( '__IMPORT_HISTORY__' )
incoming = { '__ARCHIVE_SOURCE__' : archive_source, '__ARCHIVE_TYPE__' : archive_type }
history_imp_tool.execute( trans, incoming=incoming )
class UsesHistoryDatasetAssociationMixin:
"""
Mixin for controllers that use HistoryDatasetAssociation objects.
"""
def get_dataset( self, trans, dataset_id, check_ownership=True, check_accessible=False, check_state=True ):
"""
Get an HDA object by id performing security checks using
the current transaction.
"""
try:
dataset_id = trans.security.decode_id( dataset_id )
except ( AttributeError, TypeError ):
# DEPRECATION: We still support unencoded ids for backward compatibility
try:
dataset_id = int( dataset_id )
except ValueError, v_err:
raise HTTPBadRequest( "Invalid dataset id: %s." % str( dataset_id ) )
try:
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( int( dataset_id ) )
except:
raise HTTPRequestRangeNotSatisfiable( "Invalid dataset id: %s." % str( dataset_id ) )
if check_ownership:
# Verify ownership.
user = trans.get_user()
if not user:
error( "Must be logged in to manage Galaxy items" )
if data.history.user != user:
error( "%s is not owned by current user" % data.__class__.__name__ )
if check_accessible:
current_user_roles = trans.get_current_user_roles()
if not trans.app.security_agent.can_access_dataset( current_user_roles, data.dataset ):
error( "You are not allowed to access this dataset" )
if check_state and data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading "
+ "before attempting to view it." )
return data
def get_history_dataset_association( self, trans, history, dataset_id,
check_ownership=True, check_accessible=False, check_state=False ):
"""
Get a HistoryDatasetAssociation from the database by id, verifying ownership.
"""
#TODO: duplicate of above? alias to above (or vis-versa)
self.security_check( trans, history, check_ownership=check_ownership, check_accessible=check_accessible )
hda = self.get_object( trans, dataset_id, 'HistoryDatasetAssociation',
check_ownership=False, check_accessible=False )
if check_accessible:
if( not trans.user_is_admin()
and not trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset ) ):
error( "You are not allowed to access this dataset" )
if check_state and hda.state == trans.model.Dataset.states.UPLOAD:
error( "Please wait until this dataset finishes uploading before attempting to view it." )
return hda
def get_history_dataset_association_from_ids( self, trans, id, history_id ):
# Just to echo other TODOs, there seems to be some overlap here, still
# this block appears multiple places (dataset show, history_contents
# show, upcoming history job show) so I am consolodating it here.
# Someone smarter than me should determine if there is some redundancy here.
# for anon users:
#TODO: check login_required?
#TODO: this isn't actually most_recently_used (as defined in histories)
if( ( trans.user == None )
and ( history_id == trans.security.encode_id( trans.history.id ) ) ):
history = trans.history
#TODO: dataset/hda by id (from history) OR check_ownership for anon user
hda = self.get_history_dataset_association( trans, history, id,
check_ownership=False, check_accessible=True )
else:
#TODO: do we really need the history?
history = self.get_history( trans, history_id,
check_ownership=False, check_accessible=True, deleted=False )
hda = self.get_history_dataset_association( trans, history, id,
check_ownership=False, check_accessible=True )
return hda
def get_hda_list( self, trans, hda_ids, check_ownership=True, check_accessible=False, check_state=True ):
"""
Returns one or more datasets in a list.
If a dataset is not found or is inaccessible to trans.user,
add None in its place in the list.
"""
# precondtion: dataset_ids is a list of encoded id strings
hdas = []
for id in hda_ids:
hda = None
try:
hda = self.get_dataset( trans, id,
check_ownership=check_ownership,
check_accessible=check_accessible,
check_state=check_state )
except Exception, exception:
pass
hdas.append( hda )
return hdas
def get_data( self, dataset, preview=True ):
"""
Gets a dataset's data.
"""
# Get data from file, truncating if necessary.
truncated = False
dataset_data = None
if os.path.exists( dataset.file_name ):
if isinstance( dataset.datatype, Text ):
max_peek_size = 1000000 # 1 MB
if preview and os.stat( dataset.file_name ).st_size > max_peek_size:
dataset_data = open( dataset.file_name ).read(max_peek_size)
truncated = True
else:
dataset_data = open( dataset.file_name ).read(max_peek_size)
truncated = False
else:
# For now, cannot get data from non-text datasets.
dataset_data = None
return truncated, dataset_data
def check_dataset_state( self, trans, dataset ):
"""
Returns a message if dataset is not ready to be used in visualization.
"""
if not dataset:
return dataset.conversion_messages.NO_DATA
if dataset.state == trans.app.model.Job.states.ERROR:
return dataset.conversion_messages.ERROR
if dataset.state != trans.app.model.Job.states.OK:
return dataset.conversion_messages.PENDING
return None
def get_hda_dict( self, trans, hda ):
"""Return full details of this HDA in dictionary form.
"""
#precondition: the user's access to this hda has already been checked
#TODO:?? postcondition: all ids are encoded (is this really what we want at this level?)
expose_dataset_path = trans.user_is_admin() or trans.app.config.expose_dataset_path
hda_dict = hda.to_dict( view='element', expose_dataset_path=expose_dataset_path )
hda_dict[ 'api_type' ] = "file"
# Add additional attributes that depend on trans can hence must be added here rather than at the model level.
can_access_hda = trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset )
can_access_hda = ( trans.user_is_admin() or can_access_hda )
if not can_access_hda:
return self.get_inaccessible_hda_dict( trans, hda )
hda_dict[ 'accessible' ] = True
#TODO: I'm unclear as to which access pattern is right
hda_dict[ 'annotation' ] = hda.get_item_annotation_str( trans.sa_session, trans.user, hda )
#annotation = getattr( hda, 'annotation', hda.get_item_annotation_str( trans.sa_session, trans.user, hda ) )
# ---- return here if deleted AND purged OR can't access
purged = ( hda.purged or hda.dataset.purged )
if ( hda.deleted and purged ):
#TODO: to_dict should really go AFTER this - only summary data
return trans.security.encode_dict_ids( hda_dict )
if expose_dataset_path:
try:
hda_dict[ 'file_name' ] = hda.file_name
except objectstore.ObjectNotFound:
log.exception( 'objectstore.ObjectNotFound, HDA %s.', hda.id )
hda_dict[ 'download_url' ] = url_for( 'history_contents_display',
history_id = trans.security.encode_id( hda.history.id ),
history_content_id = trans.security.encode_id( hda.id ) )
# resubmitted is not a real state
hda_dict[ 'resubmitted' ] = False
if hda.state == trans.app.model.Dataset.states.RESUBMITTED:
hda_dict[ 'state' ] = hda.dataset.state
hda_dict[ 'resubmitted' ] = True
# indeces, assoc. metadata files, etc.
meta_files = []
for meta_type in hda.metadata.spec.keys():
if isinstance( hda.metadata.spec[ meta_type ].param, FileParameter ):
meta_files.append( dict( file_type=meta_type ) )
if meta_files:
hda_dict[ 'meta_files' ] = meta_files
# currently, the viz reg is optional - handle on/off
if trans.app.visualizations_registry:
hda_dict[ 'visualizations' ] = trans.app.visualizations_registry.get_visualizations( trans, hda )
else:
hda_dict[ 'visualizations' ] = hda.get_visualizations()
#TODO: it may also be wiser to remove from here and add as API call that loads the visualizations
# when the visualizations button is clicked (instead of preloading/pre-checking)
# ---- return here if deleted
if hda.deleted and not purged:
return trans.security.encode_dict_ids( hda_dict )
return trans.security.encode_dict_ids( hda_dict )
def get_inaccessible_hda_dict( self, trans, hda ):
return trans.security.encode_dict_ids({
'id' : hda.id,
'history_id': hda.history.id,
'hid' : hda.hid,
'name' : hda.name,
'state' : hda.state,
'deleted' : hda.deleted,
'visible' : hda.visible,
'accessible': False
})
def get_hda_dict_with_error( self, trans, hda=None, history_id=None, id=None, error_msg='Error' ):
return trans.security.encode_dict_ids({
'id' : hda.id if hda else id,
'history_id': hda.history.id if hda else history_id,
'hid' : hda.hid if hda else '(unknown)',
'name' : hda.name if hda else '(unknown)',
'error' : error_msg,
'state' : trans.model.Dataset.states.NEW
})
def get_display_apps( self, trans, hda ):
display_apps = []
for display_app in hda.get_display_applications( trans ).itervalues():
app_links = []
for link_app in display_app.links.itervalues():
app_links.append({
'target': link_app.url.get( 'target_frame', '_blank' ),
'href' : link_app.get_display_url( hda, trans ),
'text' : gettext( link_app.name )
})
if app_links:
display_apps.append( dict( label=display_app.name, links=app_links ) )
return display_apps
def get_old_display_applications( self, trans, hda ):
display_apps = []
if not trans.app.config.enable_old_display_applications:
return display_apps
for display_app in hda.datatype.get_display_types():
target_frame, display_links = hda.datatype.get_display_links( hda,
display_app, trans.app, trans.request.base )
if len( display_links ) > 0:
display_label = hda.datatype.get_display_label( display_app )
app_links = []
for display_name, display_link in display_links:
app_links.append({
'target': target_frame,
'href' : display_link,
'text' : gettext( display_name )
})
if app_links:
display_apps.append( dict( label=display_label, links=app_links ) )
return display_apps
def set_hda_from_dict( self, trans, hda, new_data ):
"""
Changes HDA data using the given dictionary new_data.
"""
# precondition: access of the hda has already been checked
# send what we can down into the model
changed = hda.set_from_dict( new_data )
# the rest (often involving the trans) - do here
if 'annotation' in new_data.keys() and trans.get_user():
hda.add_item_annotation( trans.sa_session, trans.get_user(), hda, new_data[ 'annotation' ] )
changed[ 'annotation' ] = new_data[ 'annotation' ]
if 'tags' in new_data.keys() and trans.get_user():
self.set_tags_from_list( trans, hda, new_data[ 'tags' ], user=trans.user )
# sharing/permissions?
# purged
if changed.keys():
trans.sa_session.flush()
return changed
def get_hda_job( self, hda ):
# Get dataset's job.
job = None
for job_output_assoc in hda.creating_job_associations:
job = job_output_assoc.job
break
return job
def stop_hda_creating_job( self, hda ):
"""
Stops an HDA's creating job if all the job's other outputs are deleted.
"""
if hda.parent_id is None and len( hda.creating_job_associations ) > 0:
# Mark associated job for deletion
job = hda.creating_job_associations[0].job
if job.state in [ self.app.model.Job.states.QUEUED, self.app.model.Job.states.RUNNING, self.app.model.Job.states.NEW ]:
# Are *all* of the job's other output datasets deleted?
if job.check_if_output_datasets_deleted():
job.mark_deleted( self.app.config.track_jobs_in_database )
self.app.job_manager.job_stop_queue.put( job.id )
class UsesLibraryMixin:
def get_library( self, trans, id, check_ownership=False, check_accessible=True ):
l = self.get_object( trans, id, 'Library' )
if check_accessible and not ( trans.user_is_admin() or trans.app.security_agent.can_access_library( trans.get_current_user_roles(), l ) ):
error( "LibraryFolder is not accessible to the current user" )
return l
class UsesLibraryMixinItems( SharableItemSecurityMixin ):
def get_library_folder( self, trans, id, check_ownership=False, check_accessible=True ):
return self.get_object( trans, id, 'LibraryFolder',
check_ownership=False, check_accessible=check_accessible )
def get_library_dataset_dataset_association( self, trans, id, check_ownership=False, check_accessible=True ):
# Deprecated in lieu to galaxy.managers.lddas.LDDAManager.get() but not
# reusing that exactly because of subtle differences in exception handling
# logic (API controller override get_object to be slightly different).
return self.get_object( trans, id, 'LibraryDatasetDatasetAssociation',
check_ownership=False, check_accessible=check_accessible )
def get_library_dataset( self, trans, id, check_ownership=False, check_accessible=True ):
return self.get_object( trans, id, 'LibraryDataset',
check_ownership=False, check_accessible=check_accessible )
#TODO: it makes no sense that I can get roles from a user but not user.is_admin()
#def can_user_add_to_library_item( self, trans, user, item ):
# if not user: return False
# return ( ( user.is_admin() )
# or ( trans.app.security_agent.can_add_library_item( user.all_roles(), item ) ) )
def can_current_user_add_to_library_item( self, trans, item ):
if not trans.user: return False
return ( ( trans.user_is_admin() )
or ( trans.app.security_agent.can_add_library_item( trans.get_current_user_roles(), item ) ) )
def check_user_can_add_to_library_item( self, trans, item, check_accessible=True ):
"""
Raise exception if user cannot add to the specified library item (i.e.
Folder). Can set check_accessible to False if folder was loaded with
this check.
"""
if not trans.user:
return False
current_user_roles = trans.get_current_user_roles()
if trans.user_is_admin():
return True
if check_accessible:
if not trans.app.security_agent.can_access_library_item( current_user_roles, item, trans.user ):
raise ItemAccessibilityException( )
if not trans.app.security_agent.can_add_library_item( trans.get_current_user_roles(), item ):
# Slight misuse of ItemOwnershipException?
raise ItemOwnershipException( "User cannot add to library item." )
def copy_hda_to_library_folder( self, trans, hda, library_folder, roles=None, ldda_message='' ):
#PRECONDITION: permissions for this action on hda and library_folder have been checked
roles = roles or []
# this code was extracted from library_common.add_history_datasets_to_library
#TODO: refactor library_common.add_history_datasets_to_library to use this for each hda to copy
# create the new ldda and apply the folder perms to it
ldda = hda.to_library_dataset_dataset_association( trans, target_folder=library_folder,
roles=roles, ldda_message=ldda_message )
self._apply_library_folder_permissions_to_ldda( trans, library_folder, ldda )
self._apply_hda_permissions_to_ldda( trans, hda, ldda )
#TODO:?? not really clear on how permissions are being traded here
# seems like hda -> ldda permissions should be set in to_library_dataset_dataset_association
# then they get reset in _apply_library_folder_permissions_to_ldda
# then finally, re-applies hda -> ldda for missing actions in _apply_hda_permissions_to_ldda??
return ldda
def _apply_library_folder_permissions_to_ldda( self, trans, library_folder, ldda ):
"""
Copy actions/roles from library folder to an ldda (and its library_dataset).
"""
#PRECONDITION: permissions for this action on library_folder and ldda have been checked
security_agent = trans.app.security_agent
security_agent.copy_library_permissions( trans, library_folder, ldda )
security_agent.copy_library_permissions( trans, library_folder, ldda.library_dataset )
return security_agent.get_permissions( ldda )
def _apply_hda_permissions_to_ldda( self, trans, hda, ldda ):
"""
Copy actions/roles from hda to ldda.library_dataset (and then ldda) if ldda
doesn't already have roles for the given action.
"""
#PRECONDITION: permissions for this action on hda and ldda have been checked
# Make sure to apply any defined dataset permissions, allowing the permissions inherited from the
# library_dataset to over-ride the same permissions on the dataset, if they exist.
security_agent = trans.app.security_agent
dataset_permissions_dict = security_agent.get_permissions( hda.dataset )
library_dataset = ldda.library_dataset
library_dataset_actions = [ permission.action for permission in library_dataset.actions ]
# except that: if DATASET_MANAGE_PERMISSIONS exists in the hda.dataset permissions,
# we need to instead apply those roles to the LIBRARY_MANAGE permission to the library dataset
dataset_manage_permissions_action = security_agent.get_action( 'DATASET_MANAGE_PERMISSIONS' ).action
library_manage_permissions_action = security_agent.get_action( 'LIBRARY_MANAGE' ).action
#TODO: test this and remove if in loop below
#TODO: doesn't handle action.action
#if dataset_manage_permissions_action in dataset_permissions_dict:
# managing_roles = dataset_permissions_dict.pop( dataset_manage_permissions_action )
# dataset_permissions_dict[ library_manage_permissions_action ] = managing_roles
flush_needed = False
for action, dataset_permissions_roles in dataset_permissions_dict.items():
if isinstance( action, security.Action ):
action = action.action
# alter : DATASET_MANAGE_PERMISSIONS -> LIBRARY_MANAGE (see above)
if action == dataset_manage_permissions_action:
action = library_manage_permissions_action
#TODO: generalize to util.update_dict_without_overwrite
# add the hda actions & roles to the library_dataset
#NOTE: only apply an hda perm if it's NOT set in the library_dataset perms (don't overwrite)
if action not in library_dataset_actions:
for role in dataset_permissions_roles:
ldps = trans.model.LibraryDatasetPermissions( action, library_dataset, role )
ldps = [ ldps ] if not isinstance( ldps, list ) else ldps
for ldp in ldps:
trans.sa_session.add( ldp )
flush_needed = True
if flush_needed:
trans.sa_session.flush()
# finally, apply the new library_dataset to its associated ldda (must be the same)
security_agent.copy_library_permissions( trans, library_dataset, ldda )
return security_agent.get_permissions( ldda )
class UsesVisualizationMixin( UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems ):
"""
Mixin for controllers that use Visualization objects.
"""
viz_types = [ "trackster" ]
def get_visualization( self, trans, id, check_ownership=True, check_accessible=False ):
"""
Get a Visualization from the database by id, verifying ownership.
"""
# Load workflow from database
try:
visualization = trans.sa_session.query( trans.model.Visualization ).get( trans.security.decode_id( id ) )
except TypeError:
visualization = None
if not visualization:
error( "Visualization not found" )
else:
return self.security_check( trans, visualization, check_ownership, check_accessible )
def get_visualizations_by_user( self, trans, user, order_by=None, query_only=False ):
"""
Return query or query results of visualizations filtered by a user.
Set `order_by` to a column or list of columns to change the order
returned. Defaults to `DEFAULT_ORDER_BY`.
Set `query_only` to return just the query for further filtering or
processing.
"""
#TODO: move into model (as class attr)
DEFAULT_ORDER_BY = [ model.Visualization.title ]
if not order_by:
order_by = DEFAULT_ORDER_BY
if not isinstance( order_by, list ):
order_by = [ order_by ]
query = trans.sa_session.query( model.Visualization )
query = query.filter( model.Visualization.user == user )
if order_by:
query = query.order_by( *order_by )
if query_only:
return query
return query.all()
def get_visualizations_shared_with_user( self, trans, user, order_by=None, query_only=False ):
"""
Return query or query results for visualizations shared with the given user.
Set `order_by` to a column or list of columns to change the order
returned. Defaults to `DEFAULT_ORDER_BY`.
Set `query_only` to return just the query for further filtering or
processing.
"""
DEFAULT_ORDER_BY = [ model.Visualization.title ]
if not order_by:
order_by = DEFAULT_ORDER_BY
if not isinstance( order_by, list ):
order_by = [ order_by ]
query = trans.sa_session.query( model.Visualization ).join( model.VisualizationUserShareAssociation )
query = query.filter( model.VisualizationUserShareAssociation.user_id == user.id )
# remove duplicates when a user shares with themselves?
query = query.filter( model.Visualization.user_id != user.id )
if order_by:
query = query.order_by( *order_by )
if query_only:
return query
return query.all()
def get_published_visualizations( self, trans, exclude_user=None, order_by=None, query_only=False ):
"""
Return query or query results for published visualizations optionally excluding
the user in `exclude_user`.
Set `order_by` to a column or list of columns to change the order
returned. Defaults to `DEFAULT_ORDER_BY`.
Set `query_only` to return just the query for further filtering or
processing.
"""
DEFAULT_ORDER_BY = [ model.Visualization.title ]
if not order_by:
order_by = DEFAULT_ORDER_BY
if not isinstance( order_by, list ):
order_by = [ order_by ]
query = trans.sa_session.query( model.Visualization )
query = query.filter( model.Visualization.published == True )
if exclude_user:
query = query.filter( model.Visualization.user != exclude_user )
if order_by:
query = query.order_by( *order_by )
if query_only:
return query
return query.all()
#TODO: move into model (to_dict)
def get_visualization_summary_dict( self, visualization ):
"""
Return a set of summary attributes for a visualization in dictionary form.
NOTE: that encoding ids isn't done here should happen at the caller level.
"""
#TODO: deleted
#TODO: importable
return {
'id' : visualization.id,
'title' : visualization.title,
'type' : visualization.type,
'dbkey' : visualization.dbkey,
}
def get_visualization_dict( self, visualization ):
"""
Return a set of detailed attributes for a visualization in dictionary form.
The visualization's latest_revision is returned in its own sub-dictionary.
NOTE: that encoding ids isn't done here should happen at the caller level.
"""
return {
'model_class': 'Visualization',
'id' : visualization.id,
'title' : visualization.title,
'type' : visualization.type,
'user_id' : visualization.user.id,
'dbkey' : visualization.dbkey,
'slug' : visualization.slug,
# to_dict only the latest revision (allow older to be fetched elsewhere)
'latest_revision' : self.get_visualization_revision_dict( visualization.latest_revision ),
'revisions' : [ r.id for r in visualization.revisions ],
}
def get_visualization_revision_dict( self, revision ):
"""
Return a set of detailed attributes for a visualization in dictionary form.
NOTE: that encoding ids isn't done here should happen at the caller level.
"""
return {
'model_class': 'VisualizationRevision',
'id' : revision.id,
'visualization_id' : revision.visualization.id,
'title' : revision.title,
'dbkey' : revision.dbkey,
'config' : revision.config,
}
def import_visualization( self, trans, id, user=None ):
"""
Copy the visualization with the given id and associate the copy
with the given user (defaults to trans.user).
Raises `ItemAccessibilityException` if `user` is not passed and
the current user is anonymous, and if the visualization is not `importable`.
Raises `ItemDeletionException` if the visualization has been deleted.
"""
# default to trans.user, error if anon
if not user:
if not trans.user:
raise ItemAccessibilityException( "You must be logged in to import Galaxy visualizations" )
user = trans.user
# check accessibility
visualization = self.get_visualization( trans, id, check_ownership=False )
if not visualization.importable:
raise ItemAccessibilityException( "The owner of this visualization has disabled imports via this link." )
if visualization.deleted:
raise ItemDeletionException( "You can't import this visualization because it has been deleted." )
# copy vis and alter title
#TODO: need to handle custom db keys.
imported_visualization = visualization.copy( user=user, title="imported: " + visualization.title )
trans.sa_session.add( imported_visualization )
trans.sa_session.flush()
return imported_visualization
def create_visualization( self, trans, type, title="Untitled Visualization", slug=None,
dbkey=None, annotation=None, config={}, save=True ):
"""
Create visualiation and first revision.
"""
visualization = self._create_visualization( trans, title, type, dbkey, slug, annotation, save )
#TODO: handle this error structure better either in _create or here
if isinstance( visualization, dict ):
err_dict = visualization
raise ValueError( err_dict[ 'title_err' ] or err_dict[ 'slug_err' ] )
# Create and save first visualization revision
revision = trans.model.VisualizationRevision( visualization=visualization, title=title,
config=config, dbkey=dbkey )
visualization.latest_revision = revision
if save:
session = trans.sa_session
session.add( revision )
session.flush()
return visualization
def add_visualization_revision( self, trans, visualization, config, title, dbkey ):
"""
Adds a new `VisualizationRevision` to the given `visualization` with
the given parameters and set its parent visualization's `latest_revision`
to the new revision.
"""
#precondition: only add new revision on owned vis's
#TODO:?? should we default title, dbkey, config? to which: visualization or latest_revision?
revision = trans.model.VisualizationRevision( visualization, title, dbkey, config )
visualization.latest_revision = revision
#TODO:?? does this automatically add revision to visualzation.revisions?
trans.sa_session.add( revision )
trans.sa_session.flush()
return revision
def save_visualization( self, trans, config, type, id=None, title=None, dbkey=None, slug=None, annotation=None ):
session = trans.sa_session
# Create/get visualization.
if not id:
# Create new visualization.
vis = self._create_visualization( trans, title, type, dbkey, slug, annotation )
else:
decoded_id = trans.security.decode_id( id )
vis = session.query( trans.model.Visualization ).get( decoded_id )
#TODO: security check?
# Create new VisualizationRevision that will be attached to the viz
vis_rev = trans.model.VisualizationRevision()
vis_rev.visualization = vis
# do NOT alter the dbkey
vis_rev.dbkey = vis.dbkey
# do alter the title and config
vis_rev.title = title
# -- Validate config. --
if vis.type == 'trackster':
def unpack_track( track_dict ):
""" Unpack a track from its json. """
dataset_dict = track_dict[ 'dataset' ]
return {
"dataset_id": trans.security.decode_id( dataset_dict['id'] ),
"hda_ldda": dataset_dict.get('hda_ldda', 'hda'),
"track_type": track_dict['track_type'],
"prefs": track_dict['prefs'],
"mode": track_dict['mode'],
"filters": track_dict['filters'],
"tool_state": track_dict['tool_state']
}
def unpack_collection( collection_json ):
""" Unpack a collection from its json. """
unpacked_drawables = []
drawables = collection_json[ 'drawables' ]
for drawable_json in drawables:
if 'track_type' in drawable_json:
drawable = unpack_track( drawable_json )
else:
drawable = unpack_collection( drawable_json )
unpacked_drawables.append( drawable )
return {
"obj_type": collection_json[ 'obj_type' ],
"drawables": unpacked_drawables,
"prefs": collection_json.get( 'prefs' , [] ),
"filters": collection_json.get( 'filters', None )
}
# TODO: unpack and validate bookmarks:
def unpack_bookmarks( bookmarks_json ):
return bookmarks_json
# Unpack and validate view content.
view_content = unpack_collection( config[ 'view' ] )
bookmarks = unpack_bookmarks( config[ 'bookmarks' ] )
vis_rev.config = { "view": view_content, "bookmarks": bookmarks }
# Viewport from payload
if 'viewport' in config:
chrom = config['viewport']['chrom']
start = config['viewport']['start']
end = config['viewport']['end']
overview = config['viewport']['overview']
vis_rev.config[ "viewport" ] = { 'chrom': chrom, 'start': start, 'end': end, 'overview': overview }
else:
# Default action is to save the config as is with no validation.
vis_rev.config = config
vis.latest_revision = vis_rev
session.add( vis_rev )
session.flush()
encoded_id = trans.security.encode_id( vis.id )
return { "vis_id": encoded_id, "url": url_for( controller='visualization', action=vis.type, id=encoded_id ) }
def get_tool_def( self, trans, hda ):
""" Returns definition of an interactive tool for an HDA. """
job = self.get_hda_job( hda )
if not job:
return None
tool = trans.app.toolbox.get_tool( job.tool_id )
if not tool:
return None
# Tool must have a Trackster configuration.
if not tool.trackster_conf:
return None
# -- Get tool definition and add input values from job. --
tool_dict = tool.to_dict( trans, io_details=True )
tool_param_values = dict( [ ( p.name, p.value ) for p in job.parameters ] )
tool_param_values = tool.params_from_strings( tool_param_values, trans.app, ignore_errors=True )
# Only get values for simple inputs for now.
inputs_dict = [ i for i in tool_dict[ 'inputs' ] if i[ 'type' ] not in [ 'data', 'hidden_data', 'conditional' ] ]
for t_input in inputs_dict:
# Add value to tool.
if 'name' in t_input:
name = t_input[ 'name' ]
if name in tool_param_values:
value = tool_param_values[ name ]
if isinstance( value, Dictifiable ):
value = value.to_dict()
t_input[ 'value' ] = value
return tool_dict
def get_visualization_config( self, trans, visualization ):
""" Returns a visualization's configuration. Only works for trackster visualizations right now. """
config = None
if visualization.type in [ 'trackster', 'genome' ]:
# Unpack Trackster config.
latest_revision = visualization.latest_revision
bookmarks = latest_revision.config.get( 'bookmarks', [] )
def pack_track( track_dict ):
dataset_id = track_dict['dataset_id']
hda_ldda = track_dict.get('hda_ldda', 'hda')
if hda_ldda == 'ldda':
# HACK: need to encode library dataset ID because get_hda_or_ldda
# only works for encoded datasets.
dataset_id = trans.security.encode_id( dataset_id )
dataset = self.get_hda_or_ldda( trans, hda_ldda, dataset_id )
try:
prefs = track_dict['prefs']
except KeyError:
prefs = {}
track_data_provider = trans.app.data_provider_registry.get_data_provider( trans,
original_dataset=dataset,
source='data' )
return {
"track_type": dataset.datatype.track_type,
"dataset": trans.security.encode_dict_ids( dataset.to_dict() ),
"prefs": prefs,
"mode": track_dict.get( 'mode', 'Auto' ),
"filters": track_dict.get( 'filters', { 'filters' : track_data_provider.get_filters() } ),
"tool": self.get_tool_def( trans, dataset ),
"tool_state": track_dict.get( 'tool_state', {} )
}
def pack_collection( collection_dict ):
drawables = []
for drawable_dict in collection_dict[ 'drawables' ]:
if 'track_type' in drawable_dict:
drawables.append( pack_track( drawable_dict ) )
else:
drawables.append( pack_collection( drawable_dict ) )
return {
'obj_type': collection_dict[ 'obj_type' ],
'drawables': drawables,
'prefs': collection_dict.get( 'prefs', [] ),
'filters': collection_dict.get( 'filters', {} )
}
def encode_dbkey( dbkey ):
"""
Encodes dbkey as needed. For now, prepends user's public name
to custom dbkey keys.
"""
encoded_dbkey = dbkey
user = visualization.user
if 'dbkeys' in user.preferences and dbkey in user.preferences[ 'dbkeys' ]:
encoded_dbkey = "%s:%s" % ( user.username, dbkey )
return encoded_dbkey
# Set tracks.
tracks = []
if 'tracks' in latest_revision.config:
# Legacy code.
for track_dict in visualization.latest_revision.config[ 'tracks' ]:
tracks.append( pack_track( track_dict ) )
elif 'view' in latest_revision.config:
for drawable_dict in visualization.latest_revision.config[ 'view' ][ 'drawables' ]:
if 'track_type' in drawable_dict:
tracks.append( pack_track( drawable_dict ) )
else:
tracks.append( pack_collection( drawable_dict ) )
config = { "title": visualization.title,
"vis_id": trans.security.encode_id( visualization.id ),
"tracks": tracks,
"bookmarks": bookmarks,
"chrom": "",
"dbkey": encode_dbkey( visualization.dbkey ) }
if 'viewport' in latest_revision.config:
config['viewport'] = latest_revision.config['viewport']
else:
# Default action is to return config unaltered.
latest_revision = visualization.latest_revision
config = latest_revision.config
return config
def get_new_track_config( self, trans, dataset ):
"""
Returns track configuration dict for a dataset.
"""
# Get data provider.
track_data_provider = trans.app.data_provider_registry.get_data_provider( trans, original_dataset=dataset )
if isinstance( dataset, trans.app.model.HistoryDatasetAssociation ):
hda_ldda = "hda"
elif isinstance( dataset, trans.app.model.LibraryDatasetDatasetAssociation ):
hda_ldda = "ldda"
# Get track definition.
return {
"track_type": dataset.datatype.track_type,
"name": dataset.name,
"dataset": trans.security.encode_dict_ids( dataset.to_dict() ),
"prefs": {},
"filters": { 'filters' : track_data_provider.get_filters() },
"tool": self.get_tool_def( trans, dataset ),
"tool_state": {}
}
def get_hda_or_ldda( self, trans, hda_ldda, dataset_id ):
""" Returns either HDA or LDDA for hda/ldda and id combination. """
if hda_ldda == "hda":
return self.get_dataset( trans, dataset_id, check_ownership=False, check_accessible=True )
else:
return self.get_library_dataset_dataset_association( trans, dataset_id )
# -- Helper functions --
def _create_visualization( self, trans, title, type, dbkey=None, slug=None, annotation=None, save=True ):
""" Create visualization but not first revision. Returns Visualization object. """
user = trans.get_user()
# Error checking.
title_err = slug_err = ""
if not title:
title_err = "visualization name is required"
elif slug and not _is_valid_slug( slug ):
slug_err = "visualization identifier must consist of only lowercase letters, numbers, and the '-' character"
elif slug and trans.sa_session.query( trans.model.Visualization ).filter_by( user=user, slug=slug, deleted=False ).first():
slug_err = "visualization identifier must be unique"
if title_err or slug_err:
return { 'title_err': title_err, 'slug_err': slug_err }
# Create visualization
visualization = trans.model.Visualization( user=user, title=title, dbkey=dbkey, type=type )
if slug:
visualization.slug = slug
else:
self.create_item_slug( trans.sa_session, visualization )
if annotation:
annotation = sanitize_html( annotation, 'utf-8', 'text/html' )
#TODO: if this is to stay in the mixin, UsesAnnotations should be added to the superclasses
# right now this is depending on the classes that include this mixin to have UsesAnnotations
self.add_item_annotation( trans.sa_session, trans.user, visualization, annotation )
if save:
session = trans.sa_session
session.add( visualization )
session.flush()
return visualization
def _get_genome_data( self, trans, dataset, dbkey=None ):
"""
Returns genome-wide data for dataset if available; if not, message is returned.
"""
rval = None
# Get data sources.
data_sources = dataset.get_datasources( trans )
query_dbkey = dataset.dbkey
if query_dbkey == "?":
query_dbkey = dbkey
chroms_info = self.app.genomes.chroms( trans, dbkey=query_dbkey )
# If there are no messages (messages indicate data is not ready/available), get data.
messages_list = [ data_source_dict[ 'message' ] for data_source_dict in data_sources.values() ]
message = self._get_highest_priority_msg( messages_list )
if message:
rval = message
else:
# HACK: chromatin interactions tracks use data as source.
source = 'index'
if isinstance( dataset.datatype, ChromatinInteractions ):
source = 'data'
data_provider = trans.app.data_provider_registry.get_data_provider( trans,
original_dataset=dataset,
source=source )
# HACK: pass in additional params which are used for only some
# types of data providers; level, cutoffs used for summary tree,
# num_samples for BBI, and interchromosomal used for chromatin interactions.
rval = data_provider.get_genome_data( chroms_info,
level=4, detail_cutoff=0, draw_cutoff=0,
num_samples=150,
interchromosomal=True )
return rval
# FIXME: this method probably belongs down in the model.Dataset class.
def _get_highest_priority_msg( self, message_list ):
"""
Returns highest priority message from a list of messages.
"""
return_message = None
# For now, priority is: job error (dict), no converter, pending.
for message in message_list:
if message is not None:
if isinstance(message, dict):
return_message = message
break
elif message == "no converter":
return_message = message
elif return_message == None and message == "pending":
return_message = message
return return_message
class UsesStoredWorkflowMixin( SharableItemSecurityMixin, UsesAnnotations ):
""" Mixin for controllers that use StoredWorkflow objects. """
def get_stored_workflow( self, trans, id, check_ownership=True, check_accessible=False ):
""" Get a StoredWorkflow from the database by id, verifying ownership. """
# Load workflow from database
try:
workflow = trans.sa_session.query( trans.model.StoredWorkflow ).get( trans.security.decode_id( id ) )
except TypeError:
workflow = None
if not workflow:
error( "Workflow not found" )
else:
self.security_check( trans, workflow, check_ownership, check_accessible )
# Older workflows may be missing slugs, so set them here.
if not workflow.slug:
self.create_item_slug( trans.sa_session, workflow )
trans.sa_session.flush()
return workflow
def get_stored_workflow_steps( self, trans, stored_workflow ):
""" Restores states for a stored workflow's steps. """
module_injector = WorkflowModuleInjector( trans )
for step in stored_workflow.latest_workflow.steps:
try:
module_injector.inject( step )
except MissingToolException:
# Now upgrade_messages is a string instead of a dict, why?
step.upgrade_messages = "Unknown Tool ID"
def _import_shared_workflow( self, trans, stored):
""" """
# Copy workflow.
imported_stored = model.StoredWorkflow()
imported_stored.name = "imported: " + stored.name
imported_stored.latest_workflow = stored.latest_workflow
imported_stored.user = trans.user
# Save new workflow.
session = trans.sa_session
session.add( imported_stored )
session.flush()
# Copy annotations.
self.copy_item_annotation( session, stored.user, stored, imported_stored.user, imported_stored )
for order_index, step in enumerate( stored.latest_workflow.steps ):
self.copy_item_annotation( session, stored.user, step, \
imported_stored.user, imported_stored.latest_workflow.steps[order_index] )
session.flush()
return imported_stored
def _workflow_from_dict( self, trans, data, source=None, add_to_menu=False, publish=False ):
"""
Creates a workflow from a dict. Created workflow is stored in the database and returned.
"""
# Put parameters in workflow mode
trans.workflow_building_mode = True
# Create new workflow from incoming dict
workflow = model.Workflow()
# If there's a source, put it in the workflow name.
if source:
name = "%s (imported from %s)" % ( data['name'], source )
else:
name = data['name']
workflow.name = name
# Assume no errors until we find a step that has some
workflow.has_errors = False
# Create each step
steps = []
# The editor will provide ids for each step that we don't need to save,
# but do need to use to make connections
steps_by_external_id = {}
# Keep track of tools required by the workflow that are not available in
# the local Galaxy instance. Each tuple in the list of missing_tool_tups
# will be ( tool_id, tool_name, tool_version ).
missing_tool_tups = []
supplied_steps = data[ 'steps' ]
# Try to iterate through imported workflow in such a way as to
# preserve step order.
step_indices = supplied_steps.keys()
try:
step_indices = sorted( step_indices, key=int )
except ValueError:
# to defensive, were these ever or will they ever not be integers?
pass
# First pass to build step objects and populate basic values
for step_index in step_indices:
step_dict = supplied_steps[ step_index ]
# Create the model class for the step
step = model.WorkflowStep()
steps.append( step )
steps_by_external_id[ step_dict['id' ] ] = step
# FIXME: Position should be handled inside module
step.position = step_dict['position']
module = module_factory.from_dict( trans, step_dict, secure=False )
module.save_to_step( step )
if module.type == 'tool' and module.tool is None:
# A required tool is not available in the local Galaxy instance.
missing_tool_tup = ( step_dict[ 'tool_id' ], step_dict[ 'name' ], step_dict[ 'tool_version' ] )
if missing_tool_tup not in missing_tool_tups:
missing_tool_tups.append( missing_tool_tup )
# Save the entire step_dict in the unused config field, be parsed later
# when we do have the tool
step.config = dumps(step_dict)
if step.tool_errors:
workflow.has_errors = True
# Stick this in the step temporarily
step.temp_input_connections = step_dict['input_connections']
# Save step annotation.
annotation = step_dict[ 'annotation' ]
if annotation:
annotation = sanitize_html( annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), step, annotation )
# Second pass to deal with connections between steps
for step in steps:
# Input connections
for input_name, conn_list in step.temp_input_connections.iteritems():
if not conn_list:
continue
if not isinstance(conn_list, list): # Older style singleton connection
conn_list = [conn_list]
for conn_dict in conn_list:
conn = model.WorkflowStepConnection()
conn.input_step = step
conn.input_name = input_name
conn.output_name = conn_dict['output_name']
conn.output_step = steps_by_external_id[ conn_dict['id'] ]
del step.temp_input_connections
# Order the steps if possible
attach_ordered_steps( workflow, steps )
# Connect up
stored = model.StoredWorkflow()
stored.name = workflow.name
workflow.stored_workflow = stored
stored.latest_workflow = workflow
stored.user = trans.user
stored.published = publish
if data[ 'annotation' ]:
self.add_item_annotation( trans.sa_session, stored.user, stored, data[ 'annotation' ] )
# Persist
trans.sa_session.add( stored )
trans.sa_session.flush()
if add_to_menu:
if trans.user.stored_workflow_menu_entries == None:
trans.user.stored_workflow_menu_entries = []
menuEntry = model.StoredWorkflowMenuEntry()
menuEntry.stored_workflow = stored
trans.user.stored_workflow_menu_entries.append( menuEntry )
trans.sa_session.flush()
return stored, missing_tool_tups
def _workflow_to_dict( self, trans, stored ):
"""
Converts a workflow to a dict of attributes suitable for exporting.
"""
workflow = stored.latest_workflow
workflow_annotation = self.get_item_annotation_obj( trans.sa_session, trans.user, stored )
annotation_str = ""
if workflow_annotation:
annotation_str = workflow_annotation.annotation
# Pack workflow data into a dictionary and return
data = {}
data['a_galaxy_workflow'] = 'true' # Placeholder for identifying galaxy workflow
data['format-version'] = "0.1"
data['name'] = workflow.name
data['annotation'] = annotation_str
if workflow.uuid is not None:
data['uuid'] = str(workflow.uuid)
data['steps'] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step( trans, step )
if not module:
return None
# Get user annotation.
step_annotation = self.get_item_annotation_obj(trans.sa_session, trans.user, step )
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
# Step info
step_dict = {
'id': step.order_index,
'type': module.type,
'tool_id': module.get_tool_id(),
'tool_version' : step.tool_version,
'name': module.get_name(),
'tool_state': module.get_state( secure=False ),
'tool_errors': module.get_errors(),
## 'data_inputs': module.get_data_inputs(),
## 'data_outputs': module.get_data_outputs(),
'annotation' : annotation_str
}
# Add post-job actions to step dict.
if module.type == 'tool':
pja_dict = {}
for pja in step.post_job_actions:
pja_dict[pja.action_type+pja.output_name] = dict( action_type = pja.action_type,
output_name = pja.output_name,
action_arguments = pja.action_arguments )
step_dict[ 'post_job_actions' ] = pja_dict
# Data inputs
step_dict['inputs'] = module.get_runtime_input_dicts( annotation_str )
# User outputs
step_dict['user_outputs'] = []
"""
module_outputs = module.get_data_outputs()
step_outputs = trans.sa_session.query( WorkflowOutput ).filter( step=step )
for output in step_outputs:
name = output.output_name
annotation = ""
for module_output in module_outputs:
if module_output.get( 'name', None ) == name:
output_type = module_output.get( 'extension', '' )
break
data['outputs'][name] = { 'name' : name, 'annotation' : annotation, 'type' : output_type }
"""
# All step outputs
step_dict['outputs'] = []
if type( module ) is ToolModule:
for output in module.get_data_outputs():
step_dict['outputs'].append( { 'name' : output['name'], 'type' : output['extensions'][0] } )
# Connections
input_connections = step.input_connections
if step.type is None or step.type == 'tool':
# Determine full (prefixed) names of valid input datasets
data_input_names = {}
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ) or isinstance( input, DataCollectionToolParameter ):
data_input_names[ prefixed_name ] = True
# FIXME: this updates modules silently right now; messages from updates should be provided.
module.check_and_update_state()
visit_input_values( module.tool.inputs, module.state.inputs, callback )
# Filter
# FIXME: this removes connection without displaying a message currently!
input_connections = [ conn for conn in input_connections if conn.input_name in data_input_names ]
# Encode input connections as dictionary
input_conn_dict = {}
unique_input_names = set( [conn.input_name for conn in input_connections] )
for input_name in unique_input_names:
input_conn_dict[ input_name ] = \
[ dict( id=conn.output_step.order_index, output_name=conn.output_name ) for conn in input_connections if conn.input_name == input_name ]
# Preserve backward compatability. Previously Galaxy
# assumed input connections would be dictionaries not
# lists of dictionaries, so replace any singleton list
# with just the dictionary so that workflows exported from
# newer Galaxy instances can be used with older Galaxy
# instances if they do no include multiple input
# tools. This should be removed at some point. Mirrored
# hack in _workflow_from_dict should never be removed so
# existing workflow exports continue to function.
for input_name, input_conn in dict(input_conn_dict).iteritems():
if len(input_conn) == 1:
input_conn_dict[input_name] = input_conn[0]
step_dict['input_connections'] = input_conn_dict
# Position
step_dict['position'] = step.position
# Add to return value
data['steps'][step.order_index] = step_dict
return data
class UsesFormDefinitionsMixin:
"""Mixin for controllers that use Galaxy form objects."""
def get_all_forms( self, trans, all_versions=False, filter=None, form_type='All' ):
"""
Return all the latest forms from the form_definition_current table
if all_versions is set to True. Otherwise return all the versions
of all the forms from the form_definition table.
"""
if all_versions:
return trans.sa_session.query( trans.app.model.FormDefinition )
if filter:
fdc_list = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).filter_by( **filter )
else:
fdc_list = trans.sa_session.query( trans.app.model.FormDefinitionCurrent )
if form_type == 'All':
return [ fdc.latest_form for fdc in fdc_list ]
else:
return [ fdc.latest_form for fdc in fdc_list if fdc.latest_form.type == form_type ]
def get_all_forms_by_type( self, trans, cntrller, form_type ):
forms = self.get_all_forms( trans,
filter=dict( deleted=False ),
form_type=form_type )
if not forms:
message = "There are no forms on which to base the template, so create a form and then add the template."
return trans.response.send_redirect( web.url_for( controller='forms',
action='create_form_definition',
cntrller=cntrller,
message=message,
status='done',
form_type=form_type ) )
return forms
@web.expose
def add_template( self, trans, cntrller, item_type, form_type, **kwd ):
params = util.Params( kwd )
form_id = params.get( 'form_id', 'none' )
message = util.restore_text( params.get( 'message', '' ) )
action = ''
status = params.get( 'status', 'done' )
forms = self.get_all_forms_by_type( trans, cntrller, form_type )
# form_type must be one of: RUN_DETAILS_TEMPLATE, LIBRARY_INFO_TEMPLATE
in_library = form_type == trans.model.FormDefinition.types.LIBRARY_INFO_TEMPLATE
in_sample_tracking = form_type == trans.model.FormDefinition.types.RUN_DETAILS_TEMPLATE
if in_library:
show_deleted = util.string_as_bool( params.get( 'show_deleted', False ) )
use_panels = util.string_as_bool( params.get( 'use_panels', False ) )
library_id = params.get( 'library_id', None )
folder_id = params.get( 'folder_id', None )
ldda_id = params.get( 'ldda_id', None )
is_admin = trans.user_is_admin() and cntrller in [ 'library_admin', 'requests_admin' ]
current_user_roles = trans.get_current_user_roles()
elif in_sample_tracking:
request_type_id = params.get( 'request_type_id', None )
sample_id = params.get( 'sample_id', None )
try:
if in_sample_tracking:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
request_type_id=request_type_id,
sample_id=sample_id )
elif in_library:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
library_id=library_id,
folder_id=folder_id,
ldda_id=ldda_id,
is_admin=is_admin )
if not item:
message = "Invalid %s id ( %s ) specified." % ( item_desc, str( id ) )
if in_sample_tracking:
return trans.response.send_redirect( web.url_for( controller='request_type',
action='browse_request_types',
id=request_type_id,
message=util.sanitize_text( message ),
status='error' ) )
if in_library:
return trans.response.send_redirect( web.url_for( controller='library_common',
action='browse_library',
cntrller=cntrller,
id=library_id,
show_deleted=show_deleted,
message=util.sanitize_text( message ),
status='error' ) )
except ValueError:
# At this point, the client has already redirected, so this is just here to prevent the unnecessary traceback
return None
if in_library:
# Make sure the user is authorized to do what they are trying to do.
authorized = True
if not ( is_admin or trans.app.security_agent.can_modify_library_item( current_user_roles, item ) ):
authorized = False
unauthorized = 'modify'
if not ( is_admin or trans.app.security_agent.can_access_library_item( current_user_roles, item, trans.user ) ):
authorized = False
unauthorized = 'access'
if not authorized:
message = "You are not authorized to %s %s '%s'." % ( unauthorized, item_desc, item.name )
return trans.response.send_redirect( web.url_for( controller='library_common',
action='browse_library',
cntrller=cntrller,
id=library_id,
show_deleted=show_deleted,
message=util.sanitize_text( message ),
status='error' ) )
# If the inheritable checkbox is checked, the param will be in the request
inheritable = CheckboxField.is_checked( params.get( 'inheritable', '' ) )
if params.get( 'add_template_button', False ):
if form_id not in [ None, 'None', 'none' ]:
form = trans.sa_session.query( trans.app.model.FormDefinition ).get( trans.security.decode_id( form_id ) )
form_values = trans.app.model.FormValues( form, {} )
trans.sa_session.add( form_values )
trans.sa_session.flush()
if item_type == 'library':
assoc = trans.model.LibraryInfoAssociation( item, form, form_values, inheritable=inheritable )
elif item_type == 'folder':
assoc = trans.model.LibraryFolderInfoAssociation( item, form, form_values, inheritable=inheritable )
elif item_type == 'ldda':
assoc = trans.model.LibraryDatasetDatasetInfoAssociation( item, form, form_values )
elif item_type in [ 'request_type', 'sample' ]:
run = trans.model.Run( form, form_values )
trans.sa_session.add( run )
trans.sa_session.flush()
if item_type == 'request_type':
# Delete current RequestTypeRunAssociation, if one exists.
rtra = item.run_details
if rtra:
trans.sa_session.delete( rtra )
trans.sa_session.flush()
# Add the new RequestTypeRunAssociation. Templates associated with a RequestType
# are automatically inherited to the samples.
assoc = trans.model.RequestTypeRunAssociation( item, run )
elif item_type == 'sample':
assoc = trans.model.SampleRunAssociation( item, run )
trans.sa_session.add( assoc )
trans.sa_session.flush()
message = 'A template based on the form "%s" has been added to this %s.' % ( form.name, item_desc )
new_kwd = dict( action=action,
cntrller=cntrller,
message=util.sanitize_text( message ),
status='done' )
if in_sample_tracking:
new_kwd.update( dict( controller='request_type',
request_type_id=request_type_id,
sample_id=sample_id,
id=id ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
elif in_library:
new_kwd.update( dict( controller='library_common',
use_panels=use_panels,
library_id=library_id,
folder_id=folder_id,
id=id,
show_deleted=show_deleted ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
else:
message = "Select a form on which to base the template."
status = "error"
form_id_select_field = self.build_form_id_select_field( trans, forms, selected_value=kwd.get( 'form_id', 'none' ) )
try:
decoded_form_id = trans.security.decode_id( form_id )
except:
decoded_form_id = None
if decoded_form_id:
for form in forms:
if decoded_form_id == form.id:
widgets = form.get_widgets( trans.user )
break
else:
widgets = []
new_kwd = dict( cntrller=cntrller,
item_name=item.name,
item_desc=item_desc,
item_type=item_type,
form_type=form_type,
widgets=widgets,
form_id_select_field=form_id_select_field,
message=message,
status=status )
if in_sample_tracking:
new_kwd.update( dict( request_type_id=request_type_id,
sample_id=sample_id ) )
elif in_library:
new_kwd.update( dict( use_panels=use_panels,
library_id=library_id,
folder_id=folder_id,
ldda_id=ldda_id,
inheritable_checked=inheritable,
show_deleted=show_deleted ) )
return trans.fill_template( '/common/select_template.mako',
**new_kwd )
@web.expose
def edit_template( self, trans, cntrller, item_type, form_type, **kwd ):
# Edit the template itself, keeping existing field contents, if any.
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
edited = util.string_as_bool( params.get( 'edited', False ) )
action = ''
# form_type must be one of: RUN_DETAILS_TEMPLATE, LIBRARY_INFO_TEMPLATE
in_library = form_type == trans.model.FormDefinition.types.LIBRARY_INFO_TEMPLATE
in_sample_tracking = form_type == trans.model.FormDefinition.types.RUN_DETAILS_TEMPLATE
if in_library:
show_deleted = util.string_as_bool( params.get( 'show_deleted', False ) )
use_panels = util.string_as_bool( params.get( 'use_panels', False ) )
library_id = params.get( 'library_id', None )
folder_id = params.get( 'folder_id', None )
ldda_id = params.get( 'ldda_id', None )
is_admin = trans.user_is_admin() and cntrller in [ 'library_admin', 'requests_admin' ]
current_user_roles = trans.get_current_user_roles()
elif in_sample_tracking:
request_type_id = params.get( 'request_type_id', None )
sample_id = params.get( 'sample_id', None )
try:
if in_library:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
library_id=library_id,
folder_id=folder_id,
ldda_id=ldda_id,
is_admin=is_admin )
elif in_sample_tracking:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
request_type_id=request_type_id,
sample_id=sample_id )
except ValueError:
return None
if in_library:
if not ( is_admin or trans.app.security_agent.can_modify_library_item( current_user_roles, item ) ):
message = "You are not authorized to modify %s '%s'." % ( item_desc, item.name )
return trans.response.send_redirect( web.url_for( controller='library_common',
action='browse_library',
cntrller=cntrller,
id=library_id,
show_deleted=show_deleted,
message=util.sanitize_text( message ),
status='error' ) )
# An info_association must exist at this point
if in_library:
info_association, inherited = item.get_info_association( restrict=True )
elif in_sample_tracking:
# Here run_details is a RequestTypeRunAssociation
rtra = item.run_details
info_association = rtra.run
template = info_association.template
if edited:
# The form on which the template is based has been edited, so we need to update the
# info_association with the current form
fdc = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( template.form_definition_current_id )
info_association.template = fdc.latest_form
trans.sa_session.add( info_association )
trans.sa_session.flush()
message = "The template for this %s has been updated with your changes." % item_desc
new_kwd = dict( action=action,
cntrller=cntrller,
id=id,
message=util.sanitize_text( message ),
status='done' )
if in_library:
new_kwd.update( dict( controller='library_common',
use_panels=use_panels,
library_id=library_id,
folder_id=folder_id,
show_deleted=show_deleted ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
elif in_sample_tracking:
new_kwd.update( dict( controller='request_type',
request_type_id=request_type_id,
sample_id=sample_id ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
# "template" is a FormDefinition, so since we're changing it, we need to use the latest version of it.
vars = dict( id=trans.security.encode_id( template.form_definition_current_id ),
response_redirect=web.url_for( controller='request_type',
action='edit_template',
cntrller=cntrller,
item_type=item_type,
form_type=form_type,
edited=True,
**kwd ) )
return trans.response.send_redirect( web.url_for( controller='forms', action='edit_form_definition', **vars ) )
@web.expose
def edit_template_info( self, trans, cntrller, item_type, form_type, **kwd ):
# Edit the contents of the template fields without altering the template itself.
params = util.Params( kwd )
# form_type must be one of: RUN_DETAILS_TEMPLATE, LIBRARY_INFO_TEMPLATE
in_library = form_type == trans.model.FormDefinition.types.LIBRARY_INFO_TEMPLATE
in_sample_tracking = form_type == trans.model.FormDefinition.types.RUN_DETAILS_TEMPLATE
if in_library:
library_id = params.get( 'library_id', None )
folder_id = params.get( 'folder_id', None )
ldda_id = params.get( 'ldda_id', None )
show_deleted = util.string_as_bool( params.get( 'show_deleted', False ) )
use_panels = util.string_as_bool( params.get( 'use_panels', False ) )
is_admin = ( trans.user_is_admin() and cntrller == 'library_admin' )
current_user_roles = trans.get_current_user_roles()
elif in_sample_tracking:
request_type_id = params.get( 'request_type_id', None )
sample_id = params.get( 'sample_id', None )
sample = trans.sa_session.query( trans.model.Sample ).get( trans.security.decode_id( sample_id ) )
message = util.restore_text( params.get( 'message', '' ) )
try:
if in_library:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
library_id=library_id,
folder_id=folder_id,
ldda_id=ldda_id,
is_admin=is_admin )
elif in_sample_tracking:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
request_type_id=request_type_id,
sample_id=sample_id )
except ValueError:
if cntrller == 'api':
trans.response.status = 400
return None
return None
if in_library:
if not ( is_admin or trans.app.security_agent.can_modify_library_item( current_user_roles, item ) ):
message = "You are not authorized to modify %s '%s'." % ( item_desc, item.name )
if cntrller == 'api':
trans.response.status = 400
return message
return trans.response.send_redirect( web.url_for( controller='library_common',
action='browse_library',
cntrller=cntrller,
id=library_id,
show_deleted=show_deleted,
message=util.sanitize_text( message ),
status='error' ) )
# We need the type of each template field widget
widgets = item.get_template_widgets( trans )
# The list of widgets may include an AddressField which we need to save if it is new
for index, widget_dict in enumerate( widgets ):
widget = widget_dict[ 'widget' ]
if isinstance( widget, AddressField ):
value = util.restore_text( params.get( widget.name, '' ) )
if value == 'new':
if params.get( 'edit_info_button', False ):
if self.field_param_values_ok( widget.name, 'AddressField', **kwd ):
# Save the new address
address = trans.app.model.UserAddress( user=trans.user )
self.save_widget_field( trans, address, widget.name, **kwd )
widget.value = str( address.id )
else:
message = 'Required fields are missing contents.'
if cntrller == 'api':
trans.response.status = 400
return message
new_kwd = dict( action=action,
id=id,
message=util.sanitize_text( message ),
status='error' )
if in_library:
new_kwd.update( dict( controller='library_common',
cntrller=cntrller,
use_panels=use_panels,
library_id=library_id,
folder_id=folder_id,
show_deleted=show_deleted ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
if in_sample_tracking:
new_kwd.update( dict( controller='request_type',
request_type_id=request_type_id,
sample_id=sample_id ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
else:
# Form was submitted via refresh_on_change
widget.value = 'new'
elif value == unicode( 'none' ):
widget.value = ''
else:
widget.value = value
elif isinstance( widget, CheckboxField ):
# We need to check the value from kwd since util.Params would have munged the list if
# the checkbox is checked.
value = kwd.get( widget.name, '' )
if CheckboxField.is_checked( value ):
widget.value = 'true'
else:
widget.value = util.restore_text( params.get( widget.name, '' ) )
# Save updated template field contents
field_contents = self.clean_field_contents( widgets, **kwd )
if field_contents:
if in_library:
# In in a library, since information templates are inherited, the template fields can be displayed
# on the information page for a folder or ldda when it has no info_association object. If the user
# has added field contents on an inherited template via a parent's info_association, we'll need to
# create a new form_values and info_association for the current object. The value for the returned
# inherited variable is not applicable at this level.
info_association, inherited = item.get_info_association( restrict=True )
elif in_sample_tracking:
assoc = item.run_details
if item_type == 'request_type' and assoc:
# If we're dealing with a RequestType, assoc will be a ReuqestTypeRunAssociation.
info_association = assoc.run
elif item_type == 'sample' and assoc:
# If we're dealing with a Sample, assoc will be a SampleRunAssociation if the
# Sample has one. If the Sample does not have a SampleRunAssociation, assoc will
# be the Sample's RequestType RequestTypeRunAssociation, in which case we need to
# create a SampleRunAssociation using the inherited template from the RequestType.
if isinstance( assoc, trans.model.RequestTypeRunAssociation ):
form_definition = assoc.run.template
new_form_values = trans.model.FormValues( form_definition, {} )
trans.sa_session.add( new_form_values )
trans.sa_session.flush()
new_run = trans.model.Run( form_definition, new_form_values )
trans.sa_session.add( new_run )
trans.sa_session.flush()
sra = trans.model.SampleRunAssociation( item, new_run )
trans.sa_session.add( sra )
trans.sa_session.flush()
info_association = sra.run
else:
info_association = assoc.run
else:
info_association = None
if info_association:
template = info_association.template
info = info_association.info
form_values = trans.sa_session.query( trans.app.model.FormValues ).get( info.id )
# Update existing content only if it has changed
flush_required = False
for field_contents_key, field_contents_value in field_contents.items():
if field_contents_key in form_values.content:
if form_values.content[ field_contents_key ] != field_contents_value:
flush_required = True
form_values.content[ field_contents_key ] = field_contents_value
else:
flush_required = True
form_values.content[ field_contents_key ] = field_contents_value
if flush_required:
trans.sa_session.add( form_values )
trans.sa_session.flush()
else:
if in_library:
# Inherit the next available info_association so we can get the template
info_association, inherited = item.get_info_association()
template = info_association.template
# Create a new FormValues object
form_values = trans.app.model.FormValues( template, field_contents )
trans.sa_session.add( form_values )
trans.sa_session.flush()
# Create a new info_association between the current library item and form_values
if item_type == 'folder':
# A LibraryFolder is a special case because if it inherited the template from its parent,
# we want to set inheritable to True for its info_association. This allows for the default
# inheritance to be False for each level in the Library hierarchy unless we're creating a new
# level in the hierarchy, in which case we'll inherit the "inheritable" setting from the parent
# level.
info_association = trans.app.model.LibraryFolderInfoAssociation( item, template, form_values, inheritable=inherited )
trans.sa_session.add( info_association )
trans.sa_session.flush()
elif item_type == 'ldda':
info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( item, template, form_values )
trans.sa_session.add( info_association )
trans.sa_session.flush()
message = 'The information has been updated.'
if cntrller == 'api':
return 200, message
new_kwd = dict( action=action,
cntrller=cntrller,
id=id,
message=util.sanitize_text( message ),
status='done' )
if in_library:
new_kwd.update( dict( controller='library_common',
use_panels=use_panels,
library_id=library_id,
folder_id=folder_id,
show_deleted=show_deleted ) )
if in_sample_tracking:
new_kwd.update( dict( controller='requests_common',
cntrller='requests_admin',
id=trans.security.encode_id( sample.id ),
sample_id=sample_id ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
@web.expose
def delete_template( self, trans, cntrller, item_type, form_type, **kwd ):
params = util.Params( kwd )
# form_type must be one of: RUN_DETAILS_TEMPLATE, LIBRARY_INFO_TEMPLATE
in_library = form_type == trans.model.FormDefinition.types.LIBRARY_INFO_TEMPLATE
in_sample_tracking = form_type == trans.model.FormDefinition.types.RUN_DETAILS_TEMPLATE
if in_library:
is_admin = ( trans.user_is_admin() and cntrller == 'library_admin' )
current_user_roles = trans.get_current_user_roles()
show_deleted = util.string_as_bool( params.get( 'show_deleted', False ) )
use_panels = util.string_as_bool( params.get( 'use_panels', False ) )
library_id = params.get( 'library_id', None )
folder_id = params.get( 'folder_id', None )
ldda_id = params.get( 'ldda_id', None )
elif in_sample_tracking:
request_type_id = params.get( 'request_type_id', None )
sample_id = params.get( 'sample_id', None )
#id = params.get( 'id', None )
message = util.restore_text( params.get( 'message', '' ) )
try:
if in_library:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
library_id=library_id,
folder_id=folder_id,
ldda_id=ldda_id,
is_admin=is_admin )
elif in_sample_tracking:
item, item_desc, action, id = self.get_item_and_stuff( trans,
item_type=item_type,
request_type_id=request_type_id,
sample_id=sample_id )
except ValueError:
return None
if in_library:
if not ( is_admin or trans.app.security_agent.can_modify_library_item( current_user_roles, item ) ):
message = "You are not authorized to modify %s '%s'." % ( item_desc, item.name )
return trans.response.send_redirect( web.url_for( controller='library_common',
action='browse_library',
cntrller=cntrller,
id=library_id,
show_deleted=show_deleted,
message=util.sanitize_text( message ),
status='error' ) )
if in_library:
info_association, inherited = item.get_info_association()
elif in_sample_tracking:
info_association = item.run_details
if not info_association:
message = "There is no template for this %s" % item_type
else:
if in_library:
info_association.deleted = True
trans.sa_session.add( info_association )
trans.sa_session.flush()
elif in_sample_tracking:
trans.sa_session.delete( info_association )
trans.sa_session.flush()
message = 'The template for this %s has been deleted.' % item_type
new_kwd = dict( action=action,
cntrller=cntrller,
id=id,
message=util.sanitize_text( message ),
status='done' )
if in_library:
new_kwd.update( dict( controller='library_common',
use_panels=use_panels,
library_id=library_id,
folder_id=folder_id,
show_deleted=show_deleted ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
if in_sample_tracking:
new_kwd.update( dict( controller='request_type',
request_type_id=request_type_id,
sample_id=sample_id ) )
return trans.response.send_redirect( web.url_for( **new_kwd ) )
def widget_fields_have_contents( self, widgets ):
# Return True if any of the fields in widgets contain contents, widgets is a list of dictionaries that looks something like:
# [{'widget': <galaxy.web.form_builder.TextField object at 0x10867aa10>, 'helptext': 'Field 0 help (Optional)', 'label': 'Field 0'}]
for i, field in enumerate( widgets ):
if ( isinstance( field[ 'widget' ], TextArea ) or isinstance( field[ 'widget' ], TextField ) ) and field[ 'widget' ].value:
return True
if isinstance( field[ 'widget' ], SelectField ) and field[ 'widget' ].options:
for option_label, option_value, selected in field[ 'widget' ].options:
if selected:
return True
if isinstance( field[ 'widget' ], CheckboxField ) and field[ 'widget' ].checked:
return True
if isinstance( field[ 'widget' ], WorkflowField ) and str( field[ 'widget' ].value ).lower() not in [ 'none' ]:
return True
if isinstance( field[ 'widget' ], WorkflowMappingField ) and str( field[ 'widget' ].value ).lower() not in [ 'none' ]:
return True
if isinstance( field[ 'widget' ], HistoryField ) and str( field[ 'widget' ].value ).lower() not in [ 'none' ]:
return True
if isinstance( field[ 'widget' ], AddressField ) and str( field[ 'widget' ].value ).lower() not in [ 'none' ]:
return True
return False
def clean_field_contents( self, widgets, **kwd ):
field_contents = {}
for index, widget_dict in enumerate( widgets ):
widget = widget_dict[ 'widget' ]
value = kwd.get( widget.name, '' )
if isinstance( widget, CheckboxField ):
# CheckboxField values are lists if the checkbox is checked
value = str( widget.is_checked( value ) ).lower()
elif isinstance( widget, AddressField ):
# If the address was new, is has already been saved and widget.value is the new address.id
value = widget.value
field_contents[ widget.name ] = util.restore_text( value )
return field_contents
def field_param_values_ok( self, widget_name, widget_type, **kwd ):
# Make sure required fields have contents, etc
params = util.Params( kwd )
if widget_type == 'AddressField':
if not util.restore_text( params.get( '%s_short_desc' % widget_name, '' ) ) \
or not util.restore_text( params.get( '%s_name' % widget_name, '' ) ) \
or not util.restore_text( params.get( '%s_institution' % widget_name, '' ) ) \
or not util.restore_text( params.get( '%s_address' % widget_name, '' ) ) \
or not util.restore_text( params.get( '%s_city' % widget_name, '' ) ) \
or not util.restore_text( params.get( '%s_state' % widget_name, '' ) ) \
or not util.restore_text( params.get( '%s_postal_code' % widget_name, '' ) ) \
or not util.restore_text( params.get( '%s_country' % widget_name, '' ) ):
return False
return True
def save_widget_field( self, trans, field_obj, widget_name, **kwd ):
# Save a form_builder field object
params = util.Params( kwd )
if isinstance( field_obj, trans.model.UserAddress ):
field_obj.desc = util.restore_text( params.get( '%s_short_desc' % widget_name, '' ) )
field_obj.name = util.restore_text( params.get( '%s_name' % widget_name, '' ) )
field_obj.institution = util.restore_text( params.get( '%s_institution' % widget_name, '' ) )
field_obj.address = util.restore_text( params.get( '%s_address' % widget_name, '' ) )
field_obj.city = util.restore_text( params.get( '%s_city' % widget_name, '' ) )
field_obj.state = util.restore_text( params.get( '%s_state' % widget_name, '' ) )
field_obj.postal_code = util.restore_text( params.get( '%s_postal_code' % widget_name, '' ) )
field_obj.country = util.restore_text( params.get( '%s_country' % widget_name, '' ) )
field_obj.phone = util.restore_text( params.get( '%s_phone' % widget_name, '' ) )
trans.sa_session.add( field_obj )
trans.sa_session.flush()
def get_form_values( self, trans, user, form_definition, **kwd ):
'''
Returns the name:value dictionary containing all the form values
'''
params = util.Params( kwd )
values = {}
for index, field in enumerate( form_definition.fields ):
field_type = field[ 'type' ]
field_name = field[ 'name' ]
input_value = params.get( field_name, '' )
if field_type == AddressField.__name__:
input_text_value = util.restore_text( input_value )
if input_text_value == 'new':
# Save this new address in the list of this user's addresses
user_address = trans.model.UserAddress( user=user )
self.save_widget_field( trans, user_address, field_name, **kwd )
trans.sa_session.refresh( user )
field_value = int( user_address.id )
elif input_text_value in [ '', 'none', 'None', None ]:
field_value = ''
else:
field_value = int( input_text_value )
elif field_type == CheckboxField.__name__:
field_value = CheckboxField.is_checked( input_value )
elif field_type == PasswordField.__name__:
field_value = kwd.get( field_name, '' )
else:
field_value = util.restore_text( input_value )
values[ field_name ] = field_value
return values
def populate_widgets_from_kwd( self, trans, widgets, **kwd ):
# A form submitted via refresh_on_change requires us to populate the widgets with the contents of
# the form fields the user may have entered so that when the form refreshes the contents are retained.
params = util.Params( kwd )
populated_widgets = []
for widget_dict in widgets:
widget = widget_dict[ 'widget' ]
if params.get( widget.name, False ):
# The form included a field whose contents should be used to set the
# value of the current widget (widget.name is the name set by the
# user when they defined the FormDefinition).
if isinstance( widget, AddressField ):
value = util.restore_text( params.get( widget.name, '' ) )
if value == 'none':
value = ''
widget.value = value
widget_dict[ 'widget' ] = widget
# Populate the AddressField params with the form field contents
widget_params_dict = {}
for field_name, label, help_text in widget.fields():
form_param_name = '%s_%s' % ( widget.name, field_name )
widget_params_dict[ form_param_name ] = util.restore_text( params.get( form_param_name, '' ) )
widget.params = widget_params_dict
elif isinstance( widget, CheckboxField ):
# Check the value from kwd since util.Params would have
# stringify'd the list if the checkbox is checked.
value = kwd.get( widget.name, '' )
if CheckboxField.is_checked( value ):
widget.value = 'true'
widget_dict[ 'widget' ] = widget
elif isinstance( widget, SelectField ):
# Ensure the selected option remains selected.
value = util.restore_text( params.get( widget.name, '' ) )
processed_options = []
for option_label, option_value, option_selected in widget.options:
selected = value == option_value
processed_options.append( ( option_label, option_value, selected ) )
widget.options = processed_options
else:
widget.value = util.restore_text( params.get( widget.name, '' ) )
widget_dict[ 'widget' ] = widget
populated_widgets.append( widget_dict )
return populated_widgets
def get_item_and_stuff( self, trans, item_type, **kwd ):
# Return an item, description, action and an id based on the item_type. Valid item_types are
# library, folder, ldda, request_type, sample.
if item_type == 'library':
library_id = kwd.get( 'library_id', None )
id = library_id
try:
item = trans.sa_session.query( trans.app.model.Library ).get( trans.security.decode_id( library_id ) )
except:
item = None
item_desc = 'data library'
action = 'library_info'
elif item_type == 'folder':
folder_id = kwd.get( 'folder_id', None )
id = folder_id
try:
item = trans.sa_session.query( trans.app.model.LibraryFolder ).get( trans.security.decode_id( folder_id ) )
except:
item = None
item_desc = 'folder'
action = 'folder_info'
elif item_type == 'ldda':
ldda_id = kwd.get( 'ldda_id', None )
id = ldda_id
try:
item = trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id( ldda_id ) )
except:
item = None
item_desc = 'dataset'
action = 'ldda_edit_info'
elif item_type == 'request_type':
request_type_id = kwd.get( 'request_type_id', None )
id = request_type_id
try:
item = trans.sa_session.query( trans.app.model.RequestType ).get( trans.security.decode_id( request_type_id ) )
except:
item = None
item_desc = 'request type'
action = 'view_editable_request_type'
elif item_type == 'sample':
sample_id = kwd.get( 'sample_id', None )
id = sample_id
try:
item = trans.sa_session.query( trans.app.model.Sample ).get( trans.security.decode_id( sample_id ) )
except:
item = None
item_desc = 'sample'
action = 'view_sample'
else:
item = None
#message = "Invalid item type ( %s )" % str( item_type )
item_desc = None
action = None
id = None
return item, item_desc, action, id
def build_form_id_select_field( self, trans, forms, selected_value='none' ):
return build_select_field( trans,
objs=forms,
label_attr='name',
select_field_name='form_id',
selected_value=selected_value,
refresh_on_change=True )
class SharableMixin:
""" Mixin for a controller that manages an item that can be shared. """
# -- Implemented methods. --
def _is_valid_slug( self, slug ):
""" Returns true if slug is valid. """
return _is_valid_slug( slug )
@web.expose
@web.require_login( "share Galaxy items" )
def set_public_username( self, trans, id, username, **kwargs ):
""" Set user's public username and delegate to sharing() """
user = trans.get_user()
message = validate_publicname( trans, username, user )
if message:
return trans.fill_template( '/sharing_base.mako', item=self.get_item( trans, id ), message=message, status='error' )
user.username = username
trans.sa_session.flush
return self.sharing( trans, id, **kwargs )
@web.expose
@web.require_login( "modify Galaxy items" )
def set_slug_async( self, trans, id, new_slug ):
item = self.get_item( trans, id )
if item:
# Only update slug if slug is not already in use.
if trans.sa_session.query( item.__class__ ).filter_by( user=item.user, slug=new_slug ).count() == 0:
item.slug = new_slug
trans.sa_session.flush()
return item.slug
def _make_item_accessible( self, sa_session, item ):
""" Makes item accessible--viewable and importable--and sets item's slug.
Does not flush/commit changes, however. Item must have name, user,
importable, and slug attributes. """
item.importable = True
self.create_item_slug( sa_session, item )
def create_item_slug( self, sa_session, item ):
""" Create/set item slug. Slug is unique among user's importable items
for item's class. Returns true if item's slug was set/changed; false
otherwise.
"""
cur_slug = item.slug
# Setup slug base.
if cur_slug is None or cur_slug == "":
# Item can have either a name or a title.
if hasattr( item, 'name' ):
item_name = item.name
elif hasattr( item, 'title' ):
item_name = item.title
slug_base = util.ready_name_for_url( item_name.lower() )
else:
slug_base = cur_slug
# Using slug base, find a slug that is not taken. If slug is taken,
# add integer to end.
new_slug = slug_base
count = 1
# Ensure unique across model class and user and don't include this item
# in the check in case it has previously been assigned a valid slug.
while sa_session.query( item.__class__ ).filter( item.__class__.user == item.user, item.__class__.slug == new_slug, item.__class__.id != item.id).count() != 0:
# Slug taken; choose a new slug based on count. This approach can
# handle numerous items with the same name gracefully.
new_slug = '%s-%i' % ( slug_base, count )
count += 1
# Set slug and return.
item.slug = new_slug
return item.slug == cur_slug
# -- Abstract methods. --
@web.expose
@web.require_login( "share Galaxy items" )
def sharing( self, trans, id, **kwargs ):
""" Handle item sharing. """
raise "Unimplemented Method"
@web.expose
@web.require_login( "share Galaxy items" )
def share( self, trans, id=None, email="", **kwd ):
""" Handle sharing an item with a particular user. """
raise "Unimplemented Method"
@web.expose
def display_by_username_and_slug( self, trans, username, slug ):
""" Display item by username and slug. """
raise "Unimplemented Method"
@web.json
@web.require_login( "get item name and link" )
def get_name_and_link_async( self, trans, id=None ):
""" Returns item's name and link. """
raise "Unimplemented Method"
@web.expose
@web.require_login("get item content asynchronously")
def get_item_content_async( self, trans, id ):
""" Returns item content in HTML format. """
raise "Unimplemented Method"
def get_item( self, trans, id ):
""" Return item based on id. """
raise "Unimplemented Method"
class UsesQuotaMixin( object ):
def get_quota( self, trans, id, check_ownership=False, check_accessible=False, deleted=None ):
return self.get_object( trans, id, 'Quota', check_ownership=False, check_accessible=False, deleted=deleted )
class UsesTagsMixin( SharableItemSecurityMixin ):
def get_tag_handler( self, trans ):
return trans.app.tag_handler
def _get_user_tags( self, trans, item_class_name, id ):
user = trans.user
tagged_item = self._get_tagged_item( trans, item_class_name, id )
return [ tag for tag in tagged_item.tags if ( tag.user == user ) ]
def _get_tagged_item( self, trans, item_class_name, id, check_ownership=True ):
tagged_item = self.get_object( trans, id, item_class_name, check_ownership=check_ownership, check_accessible=True )
return tagged_item
def _remove_items_tag( self, trans, item_class_name, id, tag_name ):
"""Remove a tag from an item."""
user = trans.user
tagged_item = self._get_tagged_item( trans, item_class_name, id )
deleted = tagged_item and self.get_tag_handler( trans ).remove_item_tag( trans, user, tagged_item, tag_name )
trans.sa_session.flush()
return deleted
def _apply_item_tag( self, trans, item_class_name, id, tag_name, tag_value=None ):
user = trans.user
tagged_item = self._get_tagged_item( trans, item_class_name, id )
tag_assoc = self.get_tag_handler( trans ).apply_item_tag( trans, user, tagged_item, tag_name, tag_value )
trans.sa_session.flush()
return tag_assoc
def _get_item_tag_assoc( self, trans, item_class_name, id, tag_name ):
user = trans.user
tagged_item = self._get_tagged_item( trans, item_class_name, id )
log.debug( "In get_item_tag_assoc with tagged_item %s" % tagged_item )
return self.get_tag_handler( trans )._get_item_tag_assoc( user, tagged_item, tag_name )
def set_tags_from_list( self, trans, item, new_tags_list, user=None ):
# Method deprecated - try to use TagsHandler instead.
tags_manager = tags.TagsManager( trans.app )
return tags_manager.set_tags_from_list( trans, item, new_tags_list, user=user )
def get_user_tags_used( self, trans, user=None ):
"""
Return a list of distinct 'user_tname:user_value' strings that the
given user has used.
user defaults to trans.user.
Returns an empty list if no user is given and trans.user is anonymous.
"""
#TODO: for lack of a UsesUserMixin - placing this here - maybe into UsesTags, tho
user = user or trans.user
if not user:
return []
# get all the taggable model TagAssociations
tag_models = [ v.tag_assoc_class for v in trans.app.tag_handler.item_tag_assoc_info.values() ]
# create a union of subqueries for each for this user - getting only the tname and user_value
all_tags_query = None
for tag_model in tag_models:
subq = ( trans.sa_session.query( tag_model.user_tname, tag_model.user_value )
.filter( tag_model.user == trans.user ) )
all_tags_query = subq if all_tags_query is None else all_tags_query.union( subq )
# if nothing init'd the query, bail
if all_tags_query is None:
return []
# boil the tag tuples down into a sorted list of DISTINCT name:val strings
tags = all_tags_query.distinct().all()
tags = [( ( name + ':' + val ) if val else name ) for name, val in tags ]
return sorted( tags )
class UsesExtendedMetadataMixin( SharableItemSecurityMixin ):
""" Mixin for getting and setting item extended metadata. """
def get_item_extended_metadata_obj( self, trans, item ):
"""
Given an item object (such as a LibraryDatasetDatasetAssociation), find the object
of the associated extended metadata
"""
if item.extended_metadata:
return item.extended_metadata
return None
def set_item_extended_metadata_obj( self, trans, item, extmeta_obj, check_writable=False):
if item.__class__ == LibraryDatasetDatasetAssociation:
if not check_writable or trans.app.security_agent.can_modify_library_item( trans.get_current_user_roles(), item, trans.user ):
item.extended_metadata = extmeta_obj
trans.sa_session.flush()
if item.__class__ == HistoryDatasetAssociation:
history = None
if check_writable:
history = self.security_check( trans, item, check_ownership=True, check_accessible=True )
else:
history = self.security_check( trans, item, check_ownership=False, check_accessible=True )
if history:
item.extended_metadata = extmeta_obj
trans.sa_session.flush()
def unset_item_extended_metadata_obj( self, trans, item, check_writable=False):
if item.__class__ == LibraryDatasetDatasetAssociation:
if not check_writable or trans.app.security_agent.can_modify_library_item( trans.get_current_user_roles(), item, trans.user ):
item.extended_metadata = None
trans.sa_session.flush()
if item.__class__ == HistoryDatasetAssociation:
history = None
if check_writable:
history = self.security_check( trans, item, check_ownership=True, check_accessible=True )
else:
history = self.security_check( trans, item, check_ownership=False, check_accessible=True )
if history:
item.extended_metadata = None
trans.sa_session.flush()
def create_extended_metadata(self, trans, extmeta):
"""
Create/index an extended metadata object. The returned object is
not associated with any items
"""
ex_meta = ExtendedMetadata(extmeta)
trans.sa_session.add( ex_meta )
trans.sa_session.flush()
for path, value in self._scan_json_block(extmeta):
meta_i = ExtendedMetadataIndex(ex_meta, path, value)
trans.sa_session.add(meta_i)
trans.sa_session.flush()
return ex_meta
def delete_extended_metadata( self, trans, item):
if item.__class__ == ExtendedMetadata:
trans.sa_session.delete( item )
trans.sa_session.flush()
def _scan_json_block(self, meta, prefix=""):
"""
Scan a json style data structure, and emit all fields and their values.
Example paths
Data
{ "data" : [ 1, 2, 3 ] }
Path:
/data == [1,2,3]
/data/[0] == 1
"""
if isinstance(meta, dict):
for a in meta:
for path, value in self._scan_json_block(meta[a], prefix + "/" + a):
yield path, value
elif isinstance(meta, list):
for i, a in enumerate(meta):
for path, value in self._scan_json_block(a, prefix + "[%d]" % (i)):
yield path, value
else:
#BUG: Everything is cast to string, which can lead to false positives
#for cross type comparisions, ie "True" == True
yield prefix, ("%s" % (meta)).encode("utf8", errors='replace')
"""
Deprecated: `BaseController` used to be available under the name `Root`
"""
class ControllerUnavailable( Exception ):
pass
## ---- Utility methods -------------------------------------------------------
def sort_by_attr( seq, attr ):
"""
Sort the sequence of objects by object's attribute
Arguments:
seq - the list or any sequence (including immutable one) of objects to sort.
attr - the name of attribute to sort by
"""
# Use the "Schwartzian transform"
# Create the auxiliary list of tuples where every i-th tuple has form
# (seq[i].attr, i, seq[i]) and sort it. The second item of tuple is needed not
# only to provide stable sorting, but mainly to eliminate comparison of objects
# (which can be expensive or prohibited) in case of equal attribute values.
intermed = map( None, map( getattr, seq, ( attr, ) * len( seq ) ), xrange( len( seq ) ), seq )
intermed.sort()
return map( operator.getitem, intermed, ( -1, ) * len( intermed ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/web/base/controller.py
|
Python
|
gpl-3.0
| 142,060
|
[
"Galaxy"
] |
be111a634b6344ae2899b8ff385f867b435aae7e217cafa206ff9a08b874cbd7
|
#!/bin/env python
""" dummy testing for RequestContainer
:deprecated:
"""
import xml.dom.minidom
from DIRAC.RequestManagementSystem.Client.RequestContainer import RequestContainer
def getRequest( operation ):
""" fake requestDict
:param str operation: sub-request operation attribute
"""
requestContainer = RequestContainer( init = False )
requestContainer.setJobID( 1 )
requestContainer.setOwnerDN( "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=cibak/CN=605919/CN=Krzysztof Ciba" )
requestContainer.setOwnerGroup( "lhcb_user" )
requestContainer.setDIRACSetup( "LHCb-Production" )
requestContainer.setSourceComponent( None )
requestContainer.setCreationTime( "0000-00-00 00:00:00" )
requestContainer.setLastUpdate( "2011-12-01 04:57:02" )
requestContainer.setStatus( "Waiting" )
requestContainer.setAttribute( "RequestID", 123456789 )
requestContainer.initiateSubRequest( "transfer" )
subRequestDict = { "Status" : "Waiting",
"SubRequestID" : 2222222,
"Operation" : operation,
"Arguments" : None,
"ExecutionOrder" : 0,
"SourceSE" : None,
"TargetSE" : "CERN-USER,PIC-USER",
"Catalogue" : "LcgFileCatalogCombined",
"CreationTime" : "2011-12-01 04:57:02",
"SubmissionTime" : "2011-12-01 04:57:02",
"LastUpdate" : "2011-12-01 20:14:22" }
requestContainer.setSubRequestAttributes( 0, "transfer", subRequestDict )
files = [ { "FileID" : 3333333,
"LFN" : "/lhcb/user/c/cibak/11889/11889410/test.zzz",
"Size" : 44444444,
"PFN" : "srm://srm-lhcb.gridpp.rl.ac.uk/castor/ads.rl.ac.uk/prod/lhcb/user/c/cibak/11889/11889410/test.zzz",
"GUID" : "5P13RD4L-4J5L-3D21-U5P1-3RD4L4J5P13R",
"Md5" : None,
"Addler" : "92b85e26",
"Attempt" : 1,
"Status" : "Waiting" } ]
requestContainer.setSubRequestFiles( 0, "transfer", files )
return requestContainer
if __name__ == "__main__":
req = getRequest( "replicateAndRegister" )
xmlDoc_NEW = req.toXML_new( "transfer" )["Value"]
xmlDoc_OLD = req.toXML( "transfer" )["Value"]
xmlDoc_OLD = xml.dom.minidom.parseString( xmlDoc_OLD )
xmlDoc_OLD.normalize()
xmlDoc_OLD = xmlDoc_OLD.toxml()
xmlDoc_NEW = xml.dom.minidom.parseString( xmlDoc_NEW ).toxml()
print len(xmlDoc_OLD)
print len(xmlDoc_NEW)
print xmlDoc_NEW
|
avedaee/DIRAC
|
RequestManagementSystem/test/RequestContainerTests.py
|
Python
|
gpl-3.0
| 2,567
|
[
"DIRAC"
] |
38c2b0db7ef83e29090cf1845622eef2ae12083ed351543f6c1f2b82c159540f
|
from selenium.common.exceptions import NoSuchElementException
from e2e.config import LMS_URL_ROOT, MARKETING_URL_ROOT
from e2e.helpers import EcommerceHelpers, LmsHelpers
import pytest # isort:skip
def test_login_and_logout(selenium):
""" Authenticating with the identity provider (LMS) should authenticate users for the E-Commerce Service. """
LmsHelpers.login(selenium)
# Visit the Otto dashboard to trigger a login
EcommerceHelpers.visit_dashboard(selenium)
# Logging out of Otto should redirect the user to the LMS logout page, which redirects
# to the marketing site (if available) or the LMS homepage.
EcommerceHelpers.logout(selenium)
assert selenium.current_url.strip('/') in [MARKETING_URL_ROOT, LMS_URL_ROOT]
def test_provider_logout(selenium):
""" Logging out of the identity provider should log the user out of the E-Commerce Service. """
LmsHelpers.login(selenium)
# Visit the Otto dashboard to trigger a login
EcommerceHelpers.visit_dashboard(selenium)
LmsHelpers.logout(selenium)
# Now that the user has been logged out, navigating to the dashboard should result in the user being
# redirected to the identity provider's login page. This indicates the user has been logged out of both systems.
try:
EcommerceHelpers.visit_dashboard(selenium)
except NoSuchElementException:
pass
else:
pytest.fail('Logging out of the identity provider should have also logged out of the E-Commerce Service!')
def test_login_redirection(selenium):
""" If the login process is initiated at the E-Commerce Service, a successful login should return the user to
the service. """
# Visit LMS login with next query param once to perform basic authentication
selenium.get(
LmsHelpers.build_url(
'login?next={}&skip_authn_mfe=true'.format(EcommerceHelpers.build_url('dashboard'))
)
)
LmsHelpers.submit_login_form(selenium)
EcommerceHelpers.assert_on_dashboard(selenium)
|
eduNEXT/edunext-ecommerce
|
e2e/test_auth.py
|
Python
|
agpl-3.0
| 2,030
|
[
"VisIt"
] |
4d37c5645fb8e2592cfc5765c0d1e4c2fa3b3d04d7692f5aee44a62a3b3e4d93
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkMetaImageReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkMetaImageReader(), 'Reading vtkMetaImage.',
(), ('vtkMetaImage',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkMetaImageReader.py
|
Python
|
bsd-3-clause
| 488
|
[
"VTK"
] |
a07f625127176615d0f3997fba49c8e24523eeef5b7947d0d232bce9db4216c6
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from questions.models import Question,Answer
from django.contrib import admin
admin.site.register(Question)
admin.site.register(Answer)
|
brianboyer/newsmixer
|
social/questions/admin.py
|
Python
|
gpl-3.0
| 989
|
[
"Brian"
] |
52ffe15c42d6604395344c66580f1a72560fa349552ee628617faf6f5e46fbb0
|
#########################################################################################
#
# Script to populate the DIRAC FileCatalog with the information from the LFC
# FileCatalog using multiple LFC sources
#
# Author: A.Tsaregorodtsev
# Last Modified: 9.01.2012
#
#########################################################################################
from DIRAC.Core.Base import Script
Script.parseCommandLine()
import DIRAC.Resources.Catalog.LcgFileCatalogClient as LcgFileCatalogClient
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN, getGroupsWithVOMSAttribute
from DIRAC.Core.Utilities.ThreadPool import ThreadPool, ThreadedJob
from DIRAC.Core.Utilities.ProcessPool import ProcessPool
from DIRAC import gConfig, S_OK, S_ERROR
from multiprocessing import Queue, Process, Value, Manager
import time, sys, random
dirCount = 0
fileCount = 0
globalStart = time.time()
dnCache = {}
roleCache = {}
def writer( filename, writerQueue, stopFlag ):
print "entering writer"
outputFile = open( filename, 'w' )
while not stopFlag.value or not writerQueue.empty():
outputFile.write( writerQueue.get() )
outputFile.close()
print "exciting writer stopValue %s" % stopFlag.value
def getUserNameAndGroup(info):
""" Get the user name and group from the DN and VOMS role
"""
global dnCache, roleCache
owner = {}
if not "OwnerDN" in info:
return owner
username = dnCache.get(info.get('OwnerDN'))
if not username:
result = getUsernameForDN(info.get('OwnerDN','Unknown'))
if result['OK']:
username = result['Value']
dnCache[info['OwnerDN']] = username
elif "No username" in result['Message']:
username = 'Unknown'
dnCache[info['OwnerDN']] = username
if username and username != 'Unknown':
groups = roleCache.get('/'+info.get('OwnerRole'))
if not groups:
groups = getGroupsWithVOMSAttribute('/'+info['OwnerRole'])
roleCache['/'+info['OwnerRole']] = groups
if groups:
owner['username'] = username
owner['group'] = groups[0]
return owner
def processDir( initPath, writerQueue, recursive = False, host = None, fcInit = None, dfcInit = None ):
""" Process one directory, possibly recursively
"""
global globalStart, dnCache, roleCache
#print "AT >>> processDir initPath", initPath
fc = fcInit
if not fc:
fc = LcgFileCatalogClient.LcgFileCatalogClient( host=host )
dfc = dfcInit
if not dfc:
dfc = FileCatalogClient()
start = time.time()
resultList = fc.listDirectory(initPath,True)
if not resultList['OK']:
result = S_ERROR("Failed LFC lookup for %s" % initPath)
result['Path'] = initPath
return result
lfc_time = (time.time() - start)
s = time.time()
if resultList['OK']:
# Add directories
if resultList['Value']['Failed']:
result = S_ERROR( "Path %s failed: %s" % ( initPath, resultList['Value']['Failed'][initPath] ) )
return result
dirDict = resultList['Value']['Successful'][initPath]['SubDirs']
paths = {}
for path,info in dirDict.items():
paths[path] = {}
paths[path]['Mode'] = info['Mode']
owner = getUserNameAndGroup( info )
if owner:
paths[path]['Owner'] = owner
p_dirs = time.time() - s
s = time.time()
nDir = len(paths)
if nDir:
# print "Adding %d directories in %s" % ( nDir, initPath )
result = dfc.createDirectory(paths)
if not result['OK']:
print "Error adding directories:%s" % result['Message']
e_dirs = time.time() - s
# Add files
s = time.time()
fileDict = resultList['Value']['Successful'][initPath]['Files']
lfns = {}
for lfn,info in fileDict.items():
#print info['MetaData']
lfns[lfn] = {}
lfns[lfn]['Size'] = info['MetaData']['Size']
lfns[lfn]['Checksum'] = info['MetaData'].get('Checksum','')
lfns[lfn]['GUID'] = info['MetaData']['GUID']
lfns[lfn]['Mode'] = info['MetaData']['Mode']
lfns[lfn]['PFN'] = ''
owner = getUserNameAndGroup( info['MetaData'] )
if owner:
lfns[lfn]['Owner'] = owner
if info['Replicas']:
seList = info['Replicas'].keys()
lfns[lfn]['SE'] = seList
p_files = time.time() - s
s = time.time()
nFile = len(lfns)
nRep = 0
if nFile:
for lfn in lfns:
if 'SE' in lfns[lfn]:
nRep += len(lfns[lfn]['SE'])
# print "Adding %d files in %s" % ( nFile, initPath )
done = False
count = 0
error = False
while not done:
count += 1
result = dfc.addFile(lfns)
if not result['OK']:
print "Error adding files %d:" % count, result['Message']
if count > 10:
print "Completely failed path %s" % initPath
break
error = True
time.sleep(2)
elif error:
print "Successfully added files on retry %d" % count
done = True
else:
done = True
e_files = time.time() - s
dfc_time = time.time() - start - lfc_time
total_time = time.time() - globalStart
format = "== %s: time lfc/dfc %.2f/%.2f, files %d, dirs %d, reps %d, time: %.2f/%.2f/%.2f/%.2f %.2f \n"
writerQueue.put( format % ( initPath, lfc_time, dfc_time, nFile, nDir, nRep, p_dirs, e_dirs, p_files, e_files, total_time ) )
# outputFile = open('lfc_dfc.out','a')
# outputFile.write( format % (initPath,lfc_time,dfc_time,nFile,nDir,nRep,p_dirs,e_dirs,p_files,e_files,total_time) )
# outputFile.close()
# print format % (initPath,lfc_time,dfc_time,nFile,fileCount,nDir,dirCount,p_dirs,e_dirs,p_files,e_files,total_time)
# Go into directories
if recursive:
for path in paths:
result = processDir( path , writerQueue, recursive = True, host = host, fcInit = fc, dfcInit = dfc )
if result['OK']:
nFile += result['Value'].get('NumberOfFiles',0)
nDir += result['Value'].get('NumberOfDirectories',0)
nRep += result['Value'].get('NumberOfReplicas',0)
resultDict = {}
resultDict['NumberOfFiles'] = nFile
resultDict['NumberOfDirectories'] = nDir
resultDict['NumberOfReplicas'] = nRep
resultDict['Path'] = initPath
resultDict['Directories'] = dirDict.keys()
#print "AT >>> processDir",initPath,"done %.2f" % (time.time()-start)
toRet = S_OK( resultDict )
toRet['writerQueue'] = writerQueue
return toRet
def finalizeDirectory(task,result):
global lfcHosts, pPool
if result['OK']:
writerQueue = result['writerQueue']
print "Finished directory %(Path)s, dirs: %(NumberOfDirectories)s, files: %(NumberOfFiles)s, replicas: %(NumberOfReplicas)s" % result['Value']
print "%d active tasks remaining" % pPool.getNumWorkingProcesses()
if "Directories" in result['Value']:
for path in result['Value']['Directories']:
random.shuffle(lfcHosts)
#print pPool.getNumWorkingProcesses(), pPool.hasPendingTasks()
print "Queueing task for directory %s, lfc %s" % ( path, lfcHosts[0] )
result = pPool.createAndQueueTask( processDir, [path , writerQueue, False, lfcHosts[0]], callback = finalizeDirectory )
if not result['OK']:
print "Failed queueing %s" % path
else:
print "Task failed: %s" % result['Message']
if 'Path' in result:
random.shuffle(lfcHosts)
print "Requeueing task for directory %s, lfc %s" % ( result['Path'], lfcHosts[0] )
#########################################################################
pPool = ProcessPool(30,40,0)
manager = Manager()
writerQueue = manager.Queue()
stopFlag = Value( 'i', 0 )
#pPool.daemonize()
# lfcHosts = ['lfc-lhcb-ro.cern.ch',
# 'lfc-lhcb-ro.cr.cnaf.infn.it',
# 'lhcb-lfc-fzk.gridka.de',
# 'lfc-lhcb-ro.in2p3.fr',
# 'lfc-lhcb.grid.sara.nl',
# 'lfclhcb.pic.es',
# 'lhcb-lfc.gridpp.rl.ac.uk']
lfcHosts = ['prod-lfc-lhcb-ro.cern.ch']
# path = "/lhcb/LHCb"
path = '/lhcb/user/c/chaen'
print "Queueing task for directory", path, lfcHosts[0]
writerProc = Process( target = writer, args = ( 'lfc_dfc.out', writerQueue, stopFlag ) )
writerProc.start()
result = pPool.createAndQueueTask( processDir, [path , writerQueue, False, lfcHosts[0]], callback = finalizeDirectory )
if not result['OK']:
print "Failed queueing", path
for i in range(20):
pPool.processResults()
time.sleep(1)
pPool.processAllResults( timeout = 300 )
stopFlag.value = 1
writerQueue.put( "Exit" )
writerProc.join()
|
andresailer/DIRAC
|
DataManagementSystem/Utilities/lfc_dfc_copy.py
|
Python
|
gpl-3.0
| 8,617
|
[
"DIRAC",
"exciting"
] |
e213f56cd95be00a966219bc6d558613fc678412bbd9ddf1b9fceeb9f75f4e2c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Detect objects on the image."""
import copy
import numpy as np
import scipy.stats as st
import scipy.ndimage as ndimage
from scipy.ndimage.filters import gaussian_filter
from astropy.io import fits
from astropy.table import Table, Column
import sep
from kungpao.imtools import *
__all__ = ['sep_detection', 'simple_convolution_kernel', 'get_gaussian_kernel',
'detect_high_sb_objects', 'detect_low_sb_objects',
'obj_avg_mu', 'obj_avg_mu']
def simple_convolution_kernel(kernel):
"""Precomputed convolution kernel for the SEP detections."""
if kernel == 1:
# Tophat_3.0_3x3
convKer = np.asarray([[0.560000, 0.980000,
0.560000], [0.980000, 1.000000, 0.980000],
[0.560000, 0.980000, 0.560000]])
elif kernel == 2:
# Topcat_4.0_5x5
convKer = np.asarray(
[[0.000000, 0.220000, 0.480000, 0.220000,
0.000000], [0.220000, 0.990000, 1.000000, 0.990000, 0.220000],
[0.480000, 1.000000, 1.000000, 1.000000,
0.480000], [0.220000, 0.990000, 1.000000, 0.990000, 0.220000],
[0.000000, 0.220000, 0.480000, 0.220000, 0.000000]])
elif kernel == 3:
# Topcat_5.0_5x5
convKer = np.asarray(
[[0.150000, 0.770000, 1.000000, 0.770000,
0.150000], [0.770000, 1.000000, 1.000000, 1.000000, 0.770000],
[1.000000, 1.000000, 1.000000, 1.000000,
1.000000], [0.770000, 1.000000, 1.000000, 1.000000, 0.770000],
[0.150000, 0.770000, 1.000000, 0.770000, 0.150000]])
elif kernel == 4:
# Gaussian_3.0_5x5
convKer = np.asarray(
[[0.092163, 0.221178, 0.296069, 0.221178,
0.092163], [0.221178, 0.530797, 0.710525, 0.530797, 0.221178],
[0.296069, 0.710525, 0.951108, 0.710525,
0.296069], [0.221178, 0.530797, 0.710525, 0.530797, 0.221178],
[0.092163, 0.221178, 0.296069, 0.221178, 0.092163]])
elif kernel == 5:
# Gaussian_4.0_7x7
convKer = np.asarray([[
0.047454, 0.109799, 0.181612, 0.214776, 0.181612, 0.109799,
0.047454
], [
0.109799, 0.254053, 0.420215, 0.496950, 0.420215, 0.254053,
0.109799
], [
0.181612, 0.420215, 0.695055, 0.821978, 0.695055, 0.420215,
0.181612
], [
0.214776, 0.496950, 0.821978, 0.972079, 0.821978, 0.496950,
0.214776
], [
0.181612, 0.420215, 0.695055, 0.821978, 0.695055, 0.420215,
0.181612
], [
0.109799, 0.254053, 0.420215, 0.496950, 0.420215, 0.254053,
0.109799
], [
0.047454, 0.109799, 0.181612, 0.214776, 0.181612, 0.109799,
0.047454
]])
elif kernel == 6:
# Gaussian_5.0_9x9
convKer = np.asarray([[
0.030531, 0.065238, 0.112208, 0.155356, 0.173152, 0.155356,
0.112208, 0.065238, 0.030531
], [
0.065238, 0.139399, 0.239763, 0.331961, 0.369987, 0.331961,
0.239763, 0.139399, 0.065238
], [
0.112208, 0.239763, 0.412386, 0.570963, 0.636368, 0.570963,
0.412386, 0.239763, 0.112208
], [
0.155356, 0.331961, 0.570963, 0.790520, 0.881075, 0.790520,
0.570963, 0.331961, 0.155356
], [
0.173152, 0.369987, 0.636368, 0.881075, 0.982004, 0.881075,
0.636368, 0.369987, 0.173152
], [
0.155356, 0.331961, 0.570963, 0.790520, 0.881075, 0.790520,
0.570963, 0.331961, 0.155356
], [
0.112208, 0.239763, 0.412386, 0.570963, 0.636368, 0.570963,
0.412386, 0.239763, 0.112208
], [
0.065238, 0.139399, 0.239763, 0.331961, 0.369987, 0.331961,
0.239763, 0.139399, 0.065238
], [
0.030531, 0.065238, 0.112208, 0.155356, 0.173152, 0.155356,
0.112208, 0.065238, 0.030531
]])
else:
raise Exception("### More options will be available in the future")
return convKer
def get_gaussian_kernel(img_size, sig, theta=0.0, return_array=False, **kwargs):
"""Return a 2D Gaussian kernel array.
Wrapper of the astropy.convolution.Gaussian2DKernel class.
"""
from astropy.convolution import Gaussian2DKernel
if isinstance(sig, list):
x_sig, y_sig = sig[0], sig[1]
else:
x_sig = y_sig = sig
if isinstance(img_size, list):
x_size, y_size = img_size[0], img_size[1]
else:
x_size = y_size = img_size
kernal = Gaussian2DKernel(
x_stddev=x_sig, y_stddev=y_sig, theta=theta, x_size=x_size, y_size=y_size,
**kwargs)
if return_array:
return kernal.array
return kernal
def sep_detection(img, threshold, kernel=4, err=None, use_sig=True,
subtract_bkg=True, return_bkg=True, return_seg=True,
bkg_kwargs=None, **det_kwargs):
"""Object detection using SEP.
Example of bkg_kwargs:
{'mask': None, 'bw': 100, 'bh': 100, 'fw': 100, 'fh': 100 }
Example of det_kwargs:
{'minarea': 10, 'deblend_nthreshs': 32,
'deblend_conts': 0.0001, 'filter_type': 'matched'}
"""
# Determine the kernel used in detection
if isinstance(kernel, int):
filter_kernel = simple_convolution_kernel(kernel)
elif isinstance(kernel, (list, tuple, np.ndarray)):
filter_kernel = get_gaussian_kernel(kernel[0], kernel[1])
else:
raise Exception("Wrong choice for convolution kernel")
# Estimate background, subtract it if necessary
if bkg_kwargs is not None:
bkg, rms = img_measure_background(img, use_sep=True, **bkg_kwargs)
else:
bkg, rms = img_measure_background(img, use_sep=True)
if subtract_bkg:
img -= bkg
# If no error or variance array is provided, use the global rms of sky
if err is None:
threshold *= rms
# Make the detection using sigma or variance array
if use_sig:
results = sep.extract(img, threshold, err=err,
filter_kernel=filter_kernel,
segmentation_map=return_seg, **det_kwargs)
else:
results = sep.extract(img, threshold, var=err,
filter_kernel=filter_kernel,
segmentation_map=return_seg, **det_kwargs)
if return_seg:
obj, seg = results
if return_bkg:
return obj, seg, bkg
return obj, seg
else:
if return_bkg:
return results, bkg
return results
def obj_avg_mu(obj, pix=0.176, zero_point=27.0):
"""Get the average surface brightness of a SEP object."""
return -2.5 * np.log10(obj['flux'] /
(np.pi * obj['a'] * obj['b'] *
(pix ** 2))) + zero_point
def obj_peak_mu(obj, pix=0.176, zero_point=27.0):
"""Get the peak surface brightness of a SEP object."""
return -2.5 * np.log10(obj['cpeak'] /
(pix ** 2.0)) + zero_point
def detect_high_sb_objects(img, sig, threshold=30.0, min_area=100, mask=None,
deb_thr_hsig=128, deb_cont_hsig=0.0001,
mu_limit=23.0, sig_hsig_1=0.1, sig_hsig_2=4.0,
verbose=False):
"""Detect all bright objects and mask them out."""
# Step 1: Detect bright objects on the image
'''
From Greco et al. 2018:
Next, we find very bright sources by flagging all pixels that are at least 28σ above the
global background level for each patch; for a typical patch, this corresponds to the
brightest ∼2% of all pixels.
The background and its variance are estimated using several iterations of sigma clipping.
In this work, we choose to detect two group of bright objects:
1: > 20 sigma, size > 200
2: > 15 sigma, size > 10000
'''
# Object detection: high threshold, relative small minimum size
obj_hsig, seg_hsig = sep.extract(img, threshold, err=sig,
minarea=min_area, mask=mask,
deblend_nthresh=deb_thr_hsig,
deblend_cont=deb_cont_hsig,
segmentation_map=True)
# Remove objects with low peak surface brightness
idx_low_peak_mu = []
obj_hsig = Table(obj_hsig)
for idx, obj in enumerate(obj_hsig):
if obj_peak_mu(obj) >= mu_limit:
seg_hsig[seg_hsig == (idx + 1)] = 0
idx_low_peak_mu.append(idx)
obj_hsig.remove_rows(idx_low_peak_mu)
if verbose:
print("# Keep %d high surface brightness objects" % len(obj_hsig))
# Generate a mask
msk_hsig = seg_to_mask(seg_hsig, sigma=sig_hsig_1, msk_max=1000.0, msk_thr=0.01)
msk_hsig_large = seg_to_mask(seg_hsig, sigma=sig_hsig_2, msk_max=1000.0, msk_thr=0.005)
return obj_hsig, msk_hsig, msk_hsig_large
def detect_low_sb_objects(img, threshold, sig, msk_hsig_1, msk_hsig_2, noise,
minarea=200, mask=None, deb_thr_lsig=64,
deb_cont_lsig=0.001, frac_mask=0.2, verbose=False):
"""Detect all the low threshold pixels."""
# Detect the low sigma pixels on the image
obj_lsig, seg_lsig = sep.extract(img, threshold, err=sig,
minarea=minarea, mask=mask,
deblend_nthresh=deb_thr_lsig,
deblend_cont=deb_cont_lsig,
segmentation_map=True)
obj_lsig = Table(obj_lsig)
obj_lsig.add_column(Column(data=(np.arange(len(obj_lsig)) + 1), name='index'))
if verbose:
print("# Detection %d low threshold objects" % len(obj_lsig))
x_mid = (obj_lsig['xmin'] + obj_lsig['xmax']) / 2.0
y_mid = (obj_lsig['ymin'] + obj_lsig['ymax']) / 2.0
# Remove the LSB objects whose center fall on the high-threshold mask
seg_lsig_clean = copy.deepcopy(seg_lsig)
obj_lsig_clean = copy.deepcopy(obj_lsig)
img_lsig_clean = copy.deepcopy(img)
idx_remove = []
for idx, obj in enumerate(obj_lsig):
# xcen, ycen = int(obj['y']), int(obj['x'])
xmid, ymid = int(y_mid[idx]), int(x_mid[idx])
msk_hsig = (msk_hsig_1 | msk_hsig_2)
if (msk_hsig[xmid, ymid] > 0):
# Replace the segement with zero
seg_lsig_clean[seg_lsig == (idx + 1)] = 0
# Replace the image with noise
img_lsig_clean[seg_lsig == (idx + 1)] = noise[seg_lsig == (idx + 1)]
# Remove the object
idx_remove.append(idx)
obj_lsig_clean.remove_rows(idx_remove)
# Remove LSB objects whose segments overlap with the high-threshold mask
frac_msk = np.asarray([(msk_hsig_1[seg_lsig_clean == idx]).sum() /
np.asarray([seg_lsig_clean == idx]).sum()
for idx in obj_lsig_clean['index']])
idx_overlap = []
for index, idx in enumerate(obj_lsig_clean['index']):
if frac_msk[index] >= frac_mask:
# Replace the segement with zero
seg_lsig_clean[seg_lsig == idx] = 0
# Replace the image with noise
img_lsig_clean[seg_lsig == idx] = noise[seg_lsig == idx]
# Remove the object
idx_overlap.append(index)
return seg_lsig_clean, img_lsig_clean
|
dr-guangtou/KungPao
|
kungpao/detection.py
|
Python
|
gpl-3.0
| 11,610
|
[
"Gaussian"
] |
5b68372b958b4bf07e8d698e360913b8ae297a59ca6611a4a2d8f0e4ebe9e4af
|
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from __future__ import unicode_literals
import os
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse
from django.template import (TemplateDoesNotExist, TemplateSyntaxError,
Context, Template, loader)
import django.template.context
from django.test import Client, TestCase
from django.test.client import encode_file, RequestFactory
from django.test.utils import ContextList, override_settings, str_prefix
from django.template.response import SimpleTemplateResponse
from django.utils._os import upath
from django.utils.translation import ugettext_lazy
from django.http import HttpResponse
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
class AssertContainsTests(TestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError as e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError as e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError as e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError as e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError as e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertContains(r, 'さかき')
self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertNotContains(r, 'はたけ')
self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
def test_binary_contains(self):
r = self.client.get('/test_client_regress/check_binary/')
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e', count=2)
def test_binary_not_contains(self):
r = self.client.get('/test_client_regress/check_binary/')
self.assertNotContains(r, b'%ODF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertNotContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
def test_nontext_contains(self):
r = self.client.get('/test_client_regress/no_template_view/')
self.assertContains(r, ugettext_lazy('once'))
def test_nontext_not_contains(self):
r = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(r, ugettext_lazy('never'))
def test_assert_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertContains(response, 'Hello')
def test_assert_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertContains(response, 'Hello')
def test_assert_not_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertNotContains(response, 'Bye')
def test_assert_not_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertNotContains(response, 'Bye')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError as e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: No templates used to render the response", str(e))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError as e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError as e:
self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError as e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError as e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError as e:
self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e))
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError as e:
self.assertIn("Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301))
self.assertEqual(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301))
self.assertEqual(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/test_client_regress/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/test_client_regress/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_to_different_host(self):
"The test client will preserve scheme, host and port changes"
response = self.client.get('/test_client_regress/redirect_other_host/', follow=True)
self.assertRedirects(response,
'https://otherserver:8443/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
# We can't use is_secure() or get_host()
# because response.request is a dictionary, not an HttpRequest
self.assertEqual(response.request.get('wsgi.url_scheme'), 'https')
self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver')
self.assertEqual(response.request.get('SERVER_PORT'), '8443')
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/', follow=True)
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError as e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError as e:
self.assertIn(str_prefix("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn(str_prefix("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
SESSION_ENGINE='regressiontests.test_client_regress.session'
)
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/test_client/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
class TemplateExceptionTests(TestCase):
def setUp(self):
# Reset the loaders so they don't try to render cached templates.
if loader.template_source_loaders is not None:
for template_loader in loader.template_source_loaders:
if hasattr(template_loader, 'reset'):
template_loader.reset()
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'bad_templates'),)
)
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about syntax error in template")
except TemplateSyntaxError:
pass
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'regressiontests.test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/test_client_regress/arg_view/somename/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
try:
django.template.context._standard_context_processors = (lambda request: {'path': request.special_path},)
response = self.client.get("/test_client_regress/request_context_view/")
self.assertContains(response, 'Path: /test_client_regress/request_context_view/')
finally:
django.template.context._standard_context_processors = None
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'NO')
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'set_session')
# Check that the session has been modified
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
# Log in
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, b'request method: HEAD')
self.assertEqual(response.content, b'')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: DELETE')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
class QueryStringTests(TestCase):
def test_get_like_requests(self):
# See: https://code.djangoproject.com/ticket/10571.
for method_name in ('get', 'head'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = '{"english": "mountain pass"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json.encode())
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
#Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return b'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0])
self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual(b'Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
b'Content-Type: application/x-compress',
b'Content-Type: application/x-zip',
b'Content-Type: application/x-zip-compressed',
b'Content-Type: application/zip',))
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
class RequestHeadersTest(TestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/test_client_regress/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/test_client_regress/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/test_client_regress/check_headers/',
status_code=301, target_status_code=200)
class ReadLimitedStreamTest(TestCase):
"""
Tests that ensure that HttpRequest.body, HttpRequest.read() and
HttpRequest.read(BUFFER) have proper LimitedStream behavior.
Refs #14753, #15785
"""
def test_body_from_empty_request(self):
"""HttpRequest.body on a test client GET request should return
the empty string."""
self.assertEqual(self.client.get("/test_client_regress/body/").content, b'')
def test_read_from_empty_request(self):
"""HttpRequest.read() on a test client GET request should return the
empty string."""
self.assertEqual(self.client.get("/test_client_regress/read_all/").content, b'')
def test_read_numbytes_from_empty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client GET request should
return the empty string."""
self.assertEqual(self.client.get("/test_client_regress/read_buffer/").content, b'')
def test_read_from_nonempty_request(self):
"""HttpRequest.read() on a test client PUT request with some payload
should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/test_client_regress/read_all/",
data=payload,
content_type='text/plain').content, payload)
def test_read_numbytes_from_nonempty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client PUT request with
some payload should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/test_client_regress/read_buffer/",
data=payload,
content_type='text/plain').content, payload)
class RequestFactoryStateTest(TestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behavior. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def common_test_that_should_always_pass(self):
request = RequestFactory().get('/')
request.session = {}
self.assertFalse(hasattr(request, 'user'))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get('/')
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
class RequestFactoryEnvironmentTests(TestCase):
"""
Regression tests for #8551 and #17067: ensure that environment variables
are set correctly in RequestFactory.
"""
def test_should_set_correct_env_variables(self):
request = RequestFactory().get('/path/')
self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1')
self.assertEqual(request.META.get('SERVER_NAME'), 'testserver')
self.assertEqual(request.META.get('SERVER_PORT'), '80')
self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1')
self.assertEqual(request.META.get('SCRIPT_NAME') +
request.META.get('PATH_INFO'), '/path/')
|
openhatch/new-mini-tasks
|
vendor/packages/Django/tests/regressiontests/test_client_regress/tests.py
|
Python
|
apache-2.0
| 48,934
|
[
"VisIt"
] |
52c035af4f011775fce69bf5c386bfa5270b06b336933e0a070a309f02cb79c8
|
# -*- coding: utf-8 -*-
#
# one_neuron_with_noise.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron with noise
----------------------
This script simulates a neuron with input from the ``poisson_generator``, and
records the neuron's membrane potential.
"""
###############################################################################
# First, we import all necessary modules needed to simulate, analyze and
# plot our example. Additionally, we set the verbosity to only show warnings
# and reset the kernel.
# Resetting the kernel removes any nodes we may have created previously and
# resets the internal clock to zero. This allows us to execute the script
# several times in a Python shell without interference from previous NEST
# simulations.
import nest
import nest.voltage_trace
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the nodes (the neuron, poisson generator (two of them), and the
# voltmeter) are created using the ``Create`` function.
# We store the returned handles in variables for later reference.
neuron = nest.Create("iaf_psc_alpha")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, the Poisson generator is configured using ``SetStatus``, which expects
# a list of node handles and a list of parameter dictionaries. We set the
# Poisson generators to 8,000 Hz and 15,000 Hz, respectively. Note that we do
# not need to set parameters for the neuron and the voltmeter, since they have
# satisfactory defaults.
noise.set([{"rate": 80000.0}, {"rate": 15000.0}])
###############################################################################
# Fourth, the neuron is connected to the ``poisson_generator`` and to the
# ``voltmeter``. We also specify the synaptic weight and delay in this step.
nest.Connect(noise, neuron, syn_spec={'weight': [[1.2, -1.0]], 'delay': 1.0})
nest.Connect(voltmeter, neuron)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
nest.voltage_trace.show()
|
SepehrMN/nest-simulator
|
pynest/examples/one_neuron_with_noise.py
|
Python
|
gpl-2.0
| 3,147
|
[
"NEURON"
] |
66fd4a234190ecfbc0786b21623bcbb3b4bbd2fbb66ff6796c3ed52fc9c17528
|
from .check import assert_subclass
class Visitor(object):
def resolve_method(self, item):
for cls in item.__class__.__mro__:
name = 'visit_' + cls.__name__
method = getattr(self, name, None)
if method:
return method
raise ValueError('no method for {type!r}'.format(
type=type(item)
))
def visit(self, item):
return self.resolve_method(item)(item)
class TransformatorsComposition(Visitor):
def __init__(self, transformators):
for transformator in transformators:
assert_subclass(transformator, Visitor)
self.transformators = transformators
def __call__(self, item):
for transformator in self.transformators:
item = transformator()(item)
return item
|
bureaucratic-labs/yargy
|
yargy/visitor.py
|
Python
|
mit
| 823
|
[
"VisIt"
] |
9da509c59cdd86973521c3147be16fa5c29f669daac4a268d1bb0a4995291247
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <neil@openmolar.com> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
EXCEL_MESSAGE='''Error importing support for writing MicroSoft Excel Files
if you need this functionality, visit http://pypi.python.org/pypi/xlutils
and install the package'''
try:
import xlwt
AVAILABLE = True
except ImportError as e:
print EXCEL_MESSAGE
AVAILABLE = False
from tempfile import TemporaryFile
import types, datetime
from PyQt4 import QtCore
class XLS_Writer(object):
def __init__(self, filename):
self.filename = filename
self.book = xlwt.Workbook()
self.sheets = {}
self.add_sheet()
self.date_style = xlwt.easyxf(num_format_str="MMMM DD, YYYY")
self.time_style = xlwt.easyxf(num_format_str="HH:MM")
self.datetime_style = xlwt.easyxf(num_format_str="HH:MM MMMM DD, YYYY")
#self.current_sheet.col(1).set_style(style)
def add_sheet(self, sheetname = ""):
sheet_no = len(self.sheets)+1
if not sheetname:
sheetname = "Sheet %d"% sheet_no
new_sheet = self.book.add_sheet(sheetname)
self.current_sheet = new_sheet
self.sheets[sheet_no] = new_sheet
self.current_rowno = 0
def save(self):
self.book.save(self.filename)
self.book.save(TemporaryFile())
def writerow(self, row):
write_list = [] # a list of values, types
for val in row:
style = None
if type(val) in types.StringTypes:
val = unicode(val)
elif type(val) == datetime.date:
style = self.date_style
elif type(val) == QtCore.QVariant:
if val.type() == QtCore.QVariant.Int:
val = val.toInt()[0]
elif val.type() == QtCore.QVariant.Date:
val = val.toDate().toPyDate()
style = self.date_style
elif val.type() == QtCore.QVariant.Time:
val = val.toDateTime().toPyTime()
style = self.time_style
elif val.type() == QtCore.QVariant.DateTime:
val = val.toDateTime().toPyDateTime()
style = self.datetime_style
else:
val = unicode(val.toString())
else:
val = unicode(val)
write_list.append((val, style))
colno = 0
for item, style in write_list:
newrow = self.current_sheet.row(self.current_rowno)
print item
if not style:
newrow.write(colno, item)
else:
newrow.write(colno, item, style)
colno += 1
self.current_rowno += 1
def write_model(self, model):
s_list = [] #QtCore.QStringList()
for col_no in range(model.columnCount()):
item = model.headerData(col_no, QtCore.Qt.Horizontal)
s_list.append(item.toString())
self.writerow(s_list)
for row_no in range(model.rowCount()):
s_list = []# QtCore.QStringList()
for col_no in range(model.columnCount()):
index = model.index(row_no, col_no)
item = model.data(index)
s_list.append(item)
self.writerow(s_list)
self.save()
if __name__ == "__main__":
from datetime import date
rows = (
("Neil",date(2009,12,9)),
("Bea",date(1970,3,8)),
("Iona",date(1998,3,11)),
("Fraser", date(2000,11,10))
)
filename = "/home/neil/Desktop/test.xls"
writer = XLS_Writer(filename)
for row in rows:
writer.writerow(row)
writer.save()
|
rowinggolfer/openmolar2
|
src/lib_openmolar/common/import_export/export_xls.py
|
Python
|
gpl-3.0
| 5,111
|
[
"VisIt"
] |
f0f0c91a5db718c6e19880bf469e1e880677affdf2a0f457a677545bdc4364b4
|
"""
Basic functions for working with images.
"""
from __future__ import division, print_function, absolute_import
import itertools as itr
import numpy as np
def _import_skimage():
"""Import scikit-image, with slightly modified `ImportError` message"""
try:
import skimage
except ImportError:
raise ImportError("scikit-image is required to use this function.")
return skimage
def resize_by_factor(im, factor):
"""
Resizes the image according to a factor. The image is pre-filtered
with a Gaussian and then resampled with bilinear interpolation.
This function uses scikit-image and essentially combines its
`pyramid_reduce` with `pyramid_expand` into one function.
Returns the same object if factor is 1, not a copy.
Parameters
----------
im : ndarray, ndim=2 or 3
Image. Either 2D or 3D with 3 or 4 channels.
factor : float
Resize factor, e.g. a factor of 0.5 will halve both sides.
"""
_import_skimage()
from skimage.transform.pyramids import pyramid_reduce, pyramid_expand
if factor < 1:
return pyramid_reduce(im, downscale=1/factor)
elif factor > 1:
return pyramid_expand(im, upscale=factor)
else:
return im
def resize(im, shape=None, max_side=None, min_side=None):
if min_side is not None:
min = np.min(im.shape[:2])
factor = min_side / min
return resize_by_factor(im, factor)
elif max_side is not None:
max = np.max(im.shape[:2])
factor = max_side / max
return resize_by_factor(im, factor)
else:
factor_y = shape[0] / im.shape[0]
factor_x = shape[1] / im.shape[1]
assert np.fabs(factor_x - factor_y) < 0.5
return resize_by_factor(im, factor_x)
def asgray(im):
"""
Takes an image and returns its grayscale version by averaging the color
channels. if an alpha channel is present, it will simply be ignored. If a
grayscale image is given, the original image is returned.
Parameters
----------
image : ndarray, ndim 2 or 3
RGB or grayscale image.
Returns
-------
gray_image : ndarray, ndim 2
Grayscale version of image.
"""
if im.ndim == 2:
return im
elif im.ndim == 3 and im.shape[2] in (3, 4):
return im[..., :3].mean(axis=-1)
else:
raise ValueError('Invalid image format')
def crop(im, size):
"""
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
"""
diff = [im.shape[index] - size[index] for index in (0, 1)]
im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]]
return im2
def crop_to_bounding_box(im, bb):
"""
Crops according to a bounding box.
Parameters
----------
bounding_box : tuple, (top, left, bottom, right)
Crops inclusively for top/left and exclusively for bottom/right.
"""
return im[bb[0]:bb[2], bb[1]:bb[3]]
def load(path, dtype=np.float64):
"""
Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred.
"""
_import_skimage()
import skimage.io
im = skimage.io.imread(path)
if dtype == np.uint8:
return im
elif dtype in {np.float16, np.float32, np.float64}:
return im.astype(dtype) / 255
else:
raise ValueError('Unsupported dtype')
def save(path, im):
"""
Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image.
"""
from PIL import Image
if im.dtype == np.uint8:
pil_im = Image.fromarray(im)
else:
pil_im = Image.fromarray((im*255).astype(np.uint8))
pil_im.save(path)
def integrate(ii, r0, c0, r1, c1):
"""
Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window.
"""
# This line is modified
S = np.zeros(ii.shape[-1])
S += ii[r1, c1]
if (r0 - 1 >= 0) and (c0 - 1 >= 0):
S += ii[r0 - 1, c0 - 1]
if (r0 - 1 >= 0):
S -= ii[r0 - 1, c1]
if (c0 - 1 >= 0):
S -= ii[r1, c0 - 1]
return S
def offset(img, offset, fill_value=0):
"""
Moves the contents of image without changing the image size. The missing
values are given a specified fill value.
Parameters
----------
img : array
Image.
offset : (vertical_offset, horizontal_offset)
Tuple of length 2, specifying the offset along the two axes.
fill_value : dtype of img
Fill value. Defaults to 0.
"""
sh = img.shape
if sh == (0, 0):
return img
else:
x = np.empty(sh)
x[:] = fill_value
x[max(offset[0], 0):min(sh[0]+offset[0], sh[0]),
max(offset[1], 0):min(sh[1]+offset[1], sh[1])] = \
img[max(-offset[0], 0):min(sh[0]-offset[0], sh[0]),
max(-offset[1], 0):min(sh[1]-offset[1], sh[1])]
return x
def bounding_box(alpha, threshold=0.1):
"""
Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold.
"""
assert alpha.ndim == 2
# Take the bounding box of the support, with a certain threshold.
supp_axs = [alpha.max(axis=1-i) for i in range(2)]
# Check first and last value of that threshold
bb = [np.where(supp_axs[i] > threshold)[0][[0, -1]] for i in range(2)]
return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])
def bounding_box_as_binary_map(alpha, threshold=0.1):
"""
Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background.
"""
bb = bounding_box(alpha)
x = np.zeros(alpha.shape, dtype=np.bool_)
x[bb[0]:bb[2], bb[1]:bb[3]] = 1
return x
def extract_patches(images, patch_shape, samples_per_image=40, seed=0,
cycle=True):
"""
Takes a set of images and yields randomly chosen patches of specified size.
Parameters
----------
images : iterable
The images have to be iterable, and each element must be a Numpy array
with at least two spatial 2 dimensions as the first and second axis.
patch_shape : tuple, length 2
The spatial shape of the patches that should be extracted. If the
images have further dimensions beyond the spatial, the patches will
copy these too.
samples_per_image : int
Samples to extract before moving on to the next image.
seed : int
Seed with which to select the patches.
cycle : bool
If True, then the function will produce patches indefinitely, by going
back to the first image when all are done. If False, the iteration will
stop when there are no more images.
Returns
-------
patch_generator
This function returns a generator that will produce patches.
Examples
--------
>>> import deepdish as dd
>>> import matplotlib.pylab as plt
>>> import itertools
>>> images = ag.io.load_example('mnist')
Now, let us say we want to exact patches from the these, where each patch
has at least some activity.
>>> gen = dd.image.extract_patches(images, (5, 5))
>>> gen = (x for x in gen if x.mean() > 0.1)
>>> patches = np.array(list(itertools.islice(gen, 25)))
>>> patches.shape
(25, 5, 5)
>>> dd.plot.images(patches)
>>> plt.show()
"""
rs = np.random.RandomState(seed)
for Xi in itr.cycle(images):
# How many patches could we extract?
w, h = [Xi.shape[i]-patch_shape[i] for i in range(2)]
assert w > 0 and h > 0
# Maybe shuffle an iterator of the indices?
indices = np.asarray(list(itr.product(range(w), range(h))))
rs.shuffle(indices)
for x, y in indices[:samples_per_image]:
yield Xi[x:x+patch_shape[0], y:y+patch_shape[1]]
|
agarbuno/deepdish
|
deepdish/image.py
|
Python
|
bsd-3-clause
| 9,113
|
[
"Gaussian"
] |
20e6f611ccf5d3b708363ee5b524642d0a88314c7d3c6ff737eb43d99e88a829
|
#!/usr/bin/env python
#PBS -N HooverNPT
#PBS -m ae
#PBS -q long
#PBS -l nodes=1:opteron
from numpy import *
from Scientific.Statistics import *
from asap3 import *
from asap3.md.npt import *
from asap3.md.langevin import *
from ase.md.velocitydistribution import *
from ase.lattice.cubic import FaceCenteredCubic
from asap3.testtools import ReportTest
import sys
T_goal = 300 # K
p_goal = 1 # GPa
bulk = 137 # Gpa
cp = 24.43 # J / (K * mol)
beta = 3 * 16.5e-6 # 1/K
#step1 = 10000
step1 = 10000
step2 = 10000
trajfile = "npt.bundle"
if len(sys.argv) == 1:
out1 = "testNPT-1.out"
out2 = "testNPT-2.out"
ttime = 50
ptime = 75
elif len(sys.argv) == 3:
ttime = float(sys.argv[1])
ptime = float(sys.argv[2])
out1 = "testNPT-%d-%d-1.out" % (ttime, ptime)
out2 = "testNPT-%d-%d-2.out" % (ttime, ptime)
else:
raise RuntimeError, "Expected zero or two parameter to the script!"
#ReportTest.Verbose()
atoms = FaceCenteredCubic(directions=((1,0,0), (0,1,0), (0,0,1)),
size=(15,15,15), symbol="Cu", pbc=True)
atoms.set_calculator(EMT())
print "Number of atoms:", len(atoms)
print "Heating to %d K using Langevin" % T_goal
lgv = Langevin(atoms, 5 * units.fs, temperature=2*T_goal*units.kB,
friction=0.01)
MaxwellBoltzmannDistribution(atoms, 0.9*T_goal*units.kB)
while atoms.get_kinetic_energy() < 1.5 * len(atoms) * T_goal * units.kB:
lgv.run(10)
T = atoms.get_kinetic_energy() / (1.5 * len(atoms) * units.kB)
print "Temperature is now %.2f K" % (T,)
print "Desired temperature reached!"
lgv.set_temperature(T_goal*units.kB)
for i in range(4):
lgv.run(100)
s = atoms.get_stress()
p = -(s[0] + s[1] + s[2])/3.0 / units.GPa
T = atoms.get_kinetic_energy() / (1.5 * len(atoms) * units.kB)
print "Pressure is %f GPa, desired pressure is %f GPa (T = %.2f K)" % (p, p_goal, T)
dv = (p - p_goal) / bulk
print "Adjusting volume by", dv
cell = atoms.get_cell()
atoms.set_cell(cell * (1.0 + dv/3.0), scale_atoms=True)
T = atoms.get_kinetic_energy() / (1.5 * len(atoms) * units.kB)
print "Temperature is now %.2f K" % (T,)
dyn = NPT(atoms, 5 * units.fs, T_goal * units.kB, p_goal * units.GPa,
ttime * units.fs, (ptime*units.fs)**2 * bulk * units.GPa)
traj = BundleTrajectory(trajfile, "w", atoms)
dyn.attach(traj, interval=50)
out = open(out1, "w")
temp = []
pres = []
vol = []
for i in xrange(step1):
dyn.run(5)
T = atoms.get_kinetic_energy() / (1.5 * len(atoms) * units.kB)
s = atoms.get_stress() / units.GPa
p = -(s[0] + s[1] + s[2])/3.0
out.write("%5.2f %7.5f %7.5f %7.5f %7.5f %7.5f %7.5f %7.5f\n" %
((T, p)+tuple(s)))
out.flush()
cell = atoms.get_cell()
v = cell[0,0] * cell[1,1] * cell[2,2]
temp.append(T)
pres.append(p)
vol.append(v)
if i % 10 == 0:
print i,"/",step1
temp = array(temp[len(temp)/2:])
pres = array(pres[len(pres)/2:])
vol = array(vol[len(vol)/2:])
print "Average temperature: %.5f K (standard deviation: %.5g K)" % (mean(temp), standardDeviation(temp))
ReportTest("Average temperature", mean(temp), T_goal,
4*standardDeviation(temp))
CV = cp * len(atoms) / 6.02205e23 / 1.60219e-19 # eV/K
expected_stddev_energy = sqrt(units.kB * T * T * CV) # eV
expected_stddev_temp = sqrt(units.kB * T * T / CV) # K
#ReportTest("Standard deviation of temperature", standardDeviation(temp),
# expected_stddev_energy / (3 * len(atoms) * units.kB), 0.05, True)
ReportTest("Standard deviation of temperature",
standardDeviation(temp[len(temp)/2:]),
expected_stddev_temp,
0.5*expected_stddev_temp)
ReportTest("Average pressure", mean(pres), p_goal, 0.01, True)
expected_stddev_pressure = sqrt(units.kB * T * bulk * units.GPa / mean(vol))
expected_stddev_volume = sqrt(units.kB * T * mean(vol) / (bulk * units.GPa))
ReportTest("Standard deviation of pressure",
standardDeviation(pres[len(pres)/2:]),
expected_stddev_pressure/units.GPa,
0.2*expected_stddev_pressure/units.GPa)
ReportTest("Standard deviation of volume",
standardDeviation(vol),
expected_stddev_volume,
0.2*expected_stddev_volume)
dyn = NPT(atoms, 5 * units.fs, T_goal*units.kB,
array([-2, -1, 0, 0, 0, 0])*p_goal*units.GPa,
25*units.fs, (75*units.fs)**2 * bulk)
out = open(out2, "w")
stress = []
for i in xrange(step2):
dyn.run(5)
T = atoms.get_kinetic_energy() / (1.5 * len(atoms) * units.kB)
s = atoms.get_stress() / units.GPa
out.write("%5.2f %7.5f %7.5f %7.5f %7.5f %7.5f %7.5f\n" %
((T, )+tuple(s)))
out.flush()
stress.append(s)
if i % 10 == 0:
print i,"/",step2,"(part 2)"
stress = array(stress[len(stress)/2:])
s0 = mean(stress[:,0])
s1 = mean(stress[:,1])
s2 = mean(stress[:,2])
ReportTest("Stress component xx", s0, -2*p_goal, 0.05*p_goal)
ReportTest("Stress component yy", s1, -p_goal, 0.05*p_goal)
ReportTest("Stress component zz", s2, 0.0, 0.05*p_goal)
ReportTest.Summary()
|
auag92/n2dm
|
Asap-3.8.4/Test/HooverNPT.py
|
Python
|
mit
| 5,068
|
[
"ASE"
] |
43b95e968307956230c4ee14fc14850427e3e8f0b5caa039519e50f1578d76f1
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest as ut
import unittest_decorators as utx
import numpy as np
import math
import espressomd
import espressomd.math
import espressomd.interactions
import espressomd.shapes
import tests_common
@utx.skipIfMissingFeatures(["LENNARD_JONES_GENERIC"])
class ShapeBasedConstraintTest(ut.TestCase):
box_l = 30.
system = espressomd.System(box_l=3 * [box_l])
def tearDown(self):
self.system.part.clear()
self.system.constraints.clear()
def pos_on_surface(self, theta, v, semiaxis0, semiaxis1,
semiaxis2, center=np.array([15, 15, 15])):
"""Return position on ellipsoid surface."""
pos = np.array([semiaxis0 * np.sqrt(1. - v**2) * np.cos(theta),
semiaxis1 * np.sqrt(1. - v**2) * np.sin(theta),
semiaxis2 * v])
return pos + center
def test_hollow_conical_frustum(self):
"""
Test implementation of conical frustum shape.
"""
R1 = 5.0
R2 = 10.0
LENGTH = 15.0
D = 2.4
# test attributes
ctp = espressomd.math.CylindricalTransformationParameters(
center=3 * [5], axis=[1., 0., 0.])
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp,
r1=R1,
r2=R2,
thickness=D,
direction=-1,
length=LENGTH,
central_angle=np.pi)
np.testing.assert_almost_equal(
np.copy(shape.cyl_transform_params.center), 3 * [5])
self.assertAlmostEqual(shape.r1, R1)
self.assertAlmostEqual(shape.r2, R2)
self.assertAlmostEqual(shape.thickness, D)
self.assertAlmostEqual(shape.length, LENGTH)
self.assertEqual(shape.direction, -1)
self.assertAlmostEqual(shape.central_angle, np.pi)
# test points on and inside of the shape
ctp = espressomd.math.CylindricalTransformationParameters()
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp, r1=R1, r2=R2, thickness=0.0, length=LENGTH)
def z(y, r1, r2, l): return l / (r1 - r2) * \
y + l / 2. - l * r1 / (r1 - r2)
y_vals = np.linspace(R1, R2, 100)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], 0.0)
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp,
r1=R1,
r2=R2,
thickness=D,
length=LENGTH,
direction=-1)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], 0.5 * D)
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp, r1=R1, r2=R2, thickness=D, length=LENGTH)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], -0.5 * D)
# check sign of dist
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp, r1=R1, r2=R1, thickness=D, length=LENGTH)
self.assertLess(shape.calc_distance(
position=[0.0, R1, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 + (0.5 - sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 + (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 - (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp,
r1=R1,
r2=R1,
thickness=D,
length=LENGTH,
direction=-1)
self.assertGreater(shape.calc_distance(
position=[0.0, R1, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 + (0.5 - sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 + (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 - (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
# test points outside of the shape
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp, r1=R1, r2=R2, thickness=D, length=LENGTH, direction=1)
dist = shape.calc_distance(position=[R1, 0, LENGTH / 2. + 5])
self.assertAlmostEqual(dist[0], 5 - D / 2.)
np.testing.assert_array_almost_equal(dist[1], [0, 0, dist[0]])
dist = shape.calc_distance(position=[0.1, 0, LENGTH / 2.])
self.assertAlmostEqual(dist[0], R1 - D / 2. - 0.1)
np.testing.assert_array_almost_equal(dist[1], [-dist[0], 0, 0])
# check rotated coordinates, central angle with straight frustum
CENTER = np.array(3 * [5])
CENTRAL_ANGLE = np.pi / 2
ctp = espressomd.math.CylindricalTransformationParameters(
center=CENTER, axis=[1., 0., 0.], orientation=[0., 0., 1.])
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp,
r1=R1,
r2=R1,
thickness=0.,
length=LENGTH,
central_angle=CENTRAL_ANGLE)
# point within length
probe_pos = CENTER + [0, 10 * sys.float_info.epsilon, 1.234]
closest_on_surface = CENTER + [0,
R1 * np.sin(CENTRAL_ANGLE / 2.),
R1 * np.cos(CENTRAL_ANGLE / 2.)]
dist = shape.calc_distance(position=probe_pos)
d_vec_expected = probe_pos - closest_on_surface
self.assertAlmostEqual(dist[0], np.linalg.norm(d_vec_expected))
np.testing.assert_array_almost_equal(d_vec_expected, np.copy(dist[1]))
# point outside of length
probe_pos = CENTER + [LENGTH, 10 * sys.float_info.epsilon, 1.234]
closest_on_surface = CENTER + [LENGTH / 2.,
R1 * np.sin(CENTRAL_ANGLE / 2.),
R1 * np.cos(CENTRAL_ANGLE / 2.)]
dist = shape.calc_distance(position=probe_pos)
d_vec_expected = probe_pos - closest_on_surface
self.assertAlmostEqual(dist[0], np.linalg.norm(d_vec_expected))
np.testing.assert_array_almost_equal(d_vec_expected, np.copy(dist[1]))
# check central angle with funnel-type frustum
ctp = espressomd.math.CylindricalTransformationParameters(
center=[LENGTH / 2., 0, 0], axis=[1., 0., 0.], orientation=[0., 0., 1.])
shape = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp,
r1=LENGTH,
r2=0,
thickness=0.,
length=LENGTH,
central_angle=np.pi)
# with this setup, the edges coincide with the xy angle bisectors
# point inside LENGTH
probe_pos = [LENGTH / 2., LENGTH / 2., 5]
d_vec_expected = np.array([0, 0, 5])
dist = shape.calc_distance(position=probe_pos)
self.assertAlmostEqual(dist[0], np.linalg.norm(d_vec_expected))
np.testing.assert_array_almost_equal(d_vec_expected, np.copy(dist[1]))
# point outside LENGTH
probe_pos = [2 * LENGTH, 5 * LENGTH, 5]
frustum_end = np.array([LENGTH, LENGTH, 0])
d_vec_expected = probe_pos - frustum_end
dist = shape.calc_distance(position=probe_pos)
self.assertAlmostEqual(dist[0], np.linalg.norm(d_vec_expected))
np.testing.assert_array_almost_equal(d_vec_expected, np.copy(dist[1]))
def test_simplepore(self):
"""
Test implementation of simplepore shape.
"""
RADIUS = 12.5
LENGTH = 15.0
CENTER = 3 * [self.box_l / 2]
AXIS = [1, 0, 0]
SRADIUS = 2
shape = espressomd.shapes.SimplePore(
center=CENTER, axis=AXIS, length=LENGTH, radius=RADIUS,
smoothing_radius=SRADIUS)
# check distances inside cylinder
for x in np.linspace(self.box_l / 2 - LENGTH / 2 + SRADIUS,
self.box_l / 2 + LENGTH / 2 - SRADIUS, 10):
for y in np.linspace(0, RADIUS, 5):
dist = shape.calc_distance(
position=[x, self.box_l / 2 + y, self.box_l / 2])
self.assertAlmostEqual(dist[0], RADIUS - y)
# check distances near the walls
for y in np.linspace(0, self.box_l / 2 - RADIUS - SRADIUS, 6):
for z in np.linspace(0, self.box_l / 2 - RADIUS - SRADIUS, 6):
for x in np.linspace(0, self.box_l / 2 - LENGTH / 2, 6):
dist_to_x = (self.box_l / 2 - LENGTH / 2 - x)
dist = shape.calc_distance(
position=[x, y, self.box_l - z])
np.testing.assert_almost_equal(
np.copy(dist[1]), [-dist_to_x, 0, 0])
dist = shape.calc_distance(
position=[self.box_l - x, self.box_l - y, z])
np.testing.assert_almost_equal(
np.copy(dist[1]), [dist_to_x, 0, 0])
# check getters
self.assertAlmostEqual(shape.radius, RADIUS)
self.assertAlmostEqual(shape.length, LENGTH)
self.assertAlmostEqual(shape.smoothing_radius, SRADIUS)
np.testing.assert_almost_equal(np.copy(shape.axis), AXIS)
np.testing.assert_almost_equal(np.copy(shape.center), CENTER)
def test_sphere(self):
"""Checks geometry of an inverted sphere
"""
rad = self.box_l / 2.0
sphere_shape = espressomd.shapes.Sphere(
center=3 * [rad],
radius=rad,
direction=-1)
phi_steps = 11
theta_steps = 11
for distance in {-1.2, 2.6}:
for phi in range(phi_steps):
phi_angle = phi / phi_steps * 2.0 * math.pi
for theta in range(theta_steps):
theta_angle = theta / theta_steps * math.pi
pos = np.array(
[math.cos(phi_angle) * math.sin(theta_angle)
* (rad + distance),
math.sin(phi_angle) * math.sin(theta_angle)
* (rad + distance),
math.cos(theta_angle) * (rad + distance)]) + rad
shape_dist, _ = sphere_shape.calc_distance(
position=pos.tolist())
self.assertAlmostEqual(shape_dist, -distance)
def test_ellipsoid(self):
"""Checks that distance of particles on the ellipsoid constraint's surface is zero.
For the case of a spherical ellipsoid, also several non-zero distances are tested.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
p = system.part.add(pos=[0., 0., 0.], type=0)
# abuse generic LJ to measure distance via the potential V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=7., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
N = 10
# check oblate ellipsoid
semiaxes = [2.18, 5.45]
e = espressomd.shapes.Ellipsoid(
a=semiaxes[0],
b=semiaxes[1],
center=3 * [self.box_l / 2.],
direction=+1)
constraint_e = espressomd.constraints.ShapeBasedConstraint(
shape=e, particle_type=1, penetrable=True)
const1 = system.constraints.add(constraint_e)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
pos = self.pos_on_surface(
theta, v, semiaxes[0], semiaxes[1], semiaxes[1])
p.pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 0., places=6)
system.constraints.remove(const1)
# check prolate ellipsoid
semiaxes = [3.61, 2.23]
e = espressomd.shapes.Ellipsoid(
a=semiaxes[0],
b=semiaxes[1],
center=3 * [self.box_l / 2.],
direction=+1)
constraint_e = espressomd.constraints.ShapeBasedConstraint(
shape=e, particle_type=1, penetrable=True)
const1 = system.constraints.add(constraint_e)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
pos = self.pos_on_surface(
theta, v, semiaxes[0], semiaxes[1], semiaxes[1])
p.pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 0., places=6)
# check sphere (multiple distances from surface)
# change ellipsoid parameters instead of creating a new constraint
e.a = 1.
e.b = 1.
self.assertAlmostEqual(e.a, 1.)
self.assertAlmostEqual(e.b, 1.)
radii = np.linspace(1., 6.5, 7)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
for r in radii:
pos = self.pos_on_surface(theta, v, r, r, r)
p.pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], r - 1.)
# Reset the interaction to zero
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_cylinder(self):
"""Tests if shape based constraints can be added to a system both by
(1) defining a constraint object which is then added
(2) and via keyword arguments.
Checks that cylinder constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
rad = self.box_l / 2.0
length = self.box_l / 2.0
system.part.add(pos=[rad, 1.02, rad], type=0)
# check force calculation of a cylinder without top and bottom
interaction_dir = -1 # constraint is directed inwards
cylinder_shape = espressomd.shapes.Cylinder(
center=3 * [rad],
axis=[0, 0, 1],
direction=interaction_dir,
radius=rad,
length=self.box_l + 5) # +5 in order to have no top or bottom
penetrability = False # impenetrable
outer_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=cylinder_shape, particle_type=1, penetrable=penetrability)
outer_cylinder_wall = system.constraints.add(outer_cylinder_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(outer_cylinder_constraint.min_dist(), 1.02)
# test summed forces on cylinder wall
self.assertAlmostEqual(
-1.0 * outer_cylinder_wall.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.02),
places=10) # minus for Newton's third law
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l - 1.02
system.part.add(pos=[rad, y_part2, rad], type=0)
system.integrator.run(0)
dist_part2 = self.box_l - y_part2
self.assertAlmostEqual(outer_cylinder_wall.total_force()[2], 0.0)
self.assertAlmostEqual(
outer_cylinder_wall.total_normal_force(),
2 *
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=dist_part2))
# Test the geometry of a cylinder with top and bottom
cylinder_shape_finite = espressomd.shapes.Cylinder(
center=3 * [rad],
axis=[0, 0, 1],
direction=1,
radius=rad,
length=length)
phi_steps = 11
for distance in {-3.6, 2.8}:
for z in range(int(self.box_l)):
center = np.array([rad, rad, z])
start_point = np.array([rad, 2 * rad - distance, z])
for phi in range(phi_steps):
# Rotation around the axis of the cylinder
phi_angle = phi / phi_steps * 2.0 * math.pi
phi_rot_matrix = np.array(
[[math.cos(phi_angle), -math.sin(phi_angle), 0.0],
[math.sin(phi_angle), math.cos(phi_angle), 0.0],
[0.0, 0.0, 1.0]])
phi_rot_point = np.dot(
phi_rot_matrix, start_point - center) + center
shape_dist, _ = cylinder_shape_finite.calc_distance(
position=phi_rot_point.tolist())
dist = -distance
if distance > 0.0:
if z < (self.box_l - length) / 2.0 + distance:
dist = (self.box_l - length) / 2.0 - z
elif z > (self.box_l + length) / 2.0 - distance:
dist = z - (self.box_l + length) / 2.0
else:
dist = -distance
else:
if z < (self.box_l - length) / 2.0:
z_dist = (self.box_l - length) / 2.0 - z
dist = math.sqrt(z_dist**2 + distance**2)
elif z > (self.box_l + length) / 2.0:
z_dist = z - (self.box_l + length) / 2.0
dist = math.sqrt(z_dist**2 + distance**2)
else:
dist = -distance
self.assertAlmostEqual(shape_dist, dist)
# check getters
self.assertAlmostEqual(cylinder_shape_finite.radius, rad)
self.assertAlmostEqual(cylinder_shape_finite.length, length)
np.testing.assert_almost_equal(
np.copy(cylinder_shape_finite.axis), [0, 0, 1])
np.testing.assert_almost_equal(
np.copy(cylinder_shape_finite.center), 3 * [rad])
self.assertFalse(cylinder_shape_finite.open)
cylinder_shape_finite.open = True
self.assertTrue(cylinder_shape_finite.open)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_spherocylinder(self):
"""Checks that spherocylinder constraints with LJ interactions exert
forces on a test particle (that is, the constraints do what they should)
using geometrical parameters of (1) an infinite cylinder and (2) a
finite spherocylinder.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
system.part.add(pos=[self.box_l / 2.0, 1.02, self.box_l / 2.0], type=0)
# check force calculation of spherocylinder constraint
# (1) infinite cylinder
interaction_dir = -1 # constraint is directed inwards
spherocylinder_shape = espressomd.shapes.SpheroCylinder(
center=3 * [self.box_l / 2.0],
axis=[0, 0, 1],
direction=interaction_dir,
radius=self.box_l / 2.0,
length=self.box_l + 5) # +5 in order to have no top or bottom
penetrability = False # impenetrable
outer_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=spherocylinder_shape, particle_type=1, penetrable=penetrability)
system.constraints.add(outer_cylinder_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(outer_cylinder_constraint.min_dist(), 1.02)
# test summed forces on cylinder wall
self.assertAlmostEqual(
-1.0 * outer_cylinder_constraint.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.02),
places=10) # minus for Newton's third law
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l - 1.02
system.part.add(
pos=[self.box_l / 2.0, y_part2, self.box_l / 2.0], type=0)
system.integrator.run(0)
dist_part2 = self.box_l - y_part2
self.assertAlmostEqual(outer_cylinder_constraint.total_force()[2], 0.0)
self.assertAlmostEqual(outer_cylinder_constraint.total_normal_force(),
2 * tests_common.lj_force(
espressomd, cutoff=2.0, offset=0.,
eps=1.0, sig=1.0, r=dist_part2))
# Reset
system.part.clear()
system.constraints.clear()
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
# (2) finite spherocylinder
system.part.clear()
interaction_dir = -1 # constraint is directed inwards
spherocylinder_shape = espressomd.shapes.SpheroCylinder(
center=3 * [self.box_l / 2.0],
axis=[0, 1, 0],
direction=interaction_dir,
radius=10.0,
length=6.0)
penetrability = True # penetrable
inner_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=spherocylinder_shape, particle_type=1, penetrable=penetrability)
system.constraints.add(inner_cylinder_constraint)
# V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=10., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
# check hemispherical caps (multiple distances from surface)
N = 10
radii = np.linspace(1., 12., 12)
p = system.part.add(pos=[0., 0., 0.], type=0)
for i in range(6):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
for end, r in enumerate(radii):
pos = self.pos_on_surface(theta, v, r, r, r) + [0, 3, 0]
if end % 2 == 0:
# flip to the other end of the cylinder
pos[1] = self.box_l - pos[1]
p.pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], np.abs(10. - r))
# check cylinder
for i in range(N):
theta = 2. * i / float(N) * np.pi
for r in radii:
pos = r * np.array([np.cos(theta), 0, np.sin(theta)])
system.part[0].pos = pos + self.box_l / 2.0
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], np.abs(10. - r))
# check getters
self.assertAlmostEqual(spherocylinder_shape.radius, 10.)
self.assertAlmostEqual(spherocylinder_shape.length, 6.0)
np.testing.assert_almost_equal(
np.copy(spherocylinder_shape.axis), [0, 1, 0])
np.testing.assert_almost_equal(
np.copy(spherocylinder_shape.center), 3 * [self.box_l / 2.0])
# Reset
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_wall_forces(self):
"""Tests if shape based constraints can be added to a system both by
(1) defining a constraint object which is then added
(2) and via keyword arguments.
Checks that wall constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
p = system.part.add(pos=[5., 1.21, 0.83], type=0)
# Check forces are initialized to zero
np.testing.assert_array_equal(np.copy(p.f), [0., 0., 0.])
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=1.5, sigma=1.0, cutoff=2.0, shift=0)
shape_xz = espressomd.shapes.Wall(normal=[0., 1., 0.], dist=0.)
shape_xy = espressomd.shapes.Wall(normal=[0., 0., 1.], dist=0.)
# (1)
constraint_xz = espressomd.constraints.ShapeBasedConstraint(
shape=shape_xz, particle_type=1)
wall_xz = system.constraints.add(constraint_xz)
# (2)
wall_xy = system.constraints.add(shape=shape_xy, particle_type=2)
system.integrator.run(0) # update forces
self.assertEqual(p.f[0], 0.)
self.assertAlmostEqual(
p.f[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.21),
places=10)
self.assertAlmostEqual(
p.f[2],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# test summed forces on walls
self.assertAlmostEqual(
-1.0 * wall_xz.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.21),
places=10) # minus for Newton's third law
self.assertAlmostEqual(
-1.0 * wall_xy.total_force()[2],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# check whether total_normal_force is correct
self.assertAlmostEqual(
wall_xy.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# this one is closer and should get the mindist()
p1 = system.part.add(pos=[5., 1.20, 0.82], type=0)
self.assertAlmostEqual(constraint_xz.min_dist(), p1.pos[1])
self.assertAlmostEqual(wall_xz.min_dist(), p1.pos[1])
self.assertAlmostEqual(wall_xy.min_dist(), p1.pos[2])
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_slitpore(self):
"""Checks that slitpore constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
# check force calculation of slitpore constraint
slitpore_shape = espressomd.shapes.Slitpore(
channel_width=5,
lower_smoothing_radius=2,
upper_smoothing_radius=3,
pore_length=15,
pore_mouth=20,
pore_width=10,
dividing_plane=self.box_l / 2)
slitpore_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=slitpore_shape, particle_type=1, penetrable=True)
system.constraints.add(slitpore_constraint)
# V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=10., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
p = system.part.add(pos=[0., 0., 0.], type=0)
x = self.box_l / 2.0
d = 1 - np.sqrt(2) / 2
parameters = [
([x, x, 1.], -4., [0., 0., -1.]), # outside channel
([x, x, 15.], 5., [-1., 0., 0.]), # inside channel
([x, x, 5.], 0., [0., 0., 0.]), # on channel bottom surface
([x - 5., x, 15.], 0., [0., 0., 0.]), # on channel side surface
([x + 5., x, 15.], 0., [0., 0., 0.]), # on channel side surface
([x - 5. + 2 * d, x, 5. + 2 * d], 0., [0., 0., 0.]), # lower circle
([x + 5. - 2 * d, x, 5. + 2 * d], 0., [0., 0., 0.]), # lower circle
([x - 5. - 3 * d, x, 20. - 3 * d], 0., [0., 0., 0.]), # upper circle
([x + 5. + 3 * d, x, 20. - 3 * d], 0., [0., 0., 0.]), # upper circle
([1., x, 20.], 0., [0., 0., 0.]), # on inner wall surface
([x, x, 25.], 0., [0., 0., 0.]), # on outer wall surface
([x, x, 27.], -2., [0., 0., 1.]), # outside wall
]
for pos, ref_mindist, ref_force in parameters:
p.pos = pos
system.integrator.run(recalc_forces=True, steps=0)
obs_mindist = slitpore_constraint.min_dist()
self.assertAlmostEqual(obs_mindist, ref_mindist, places=10)
if (ref_mindist == 0. and obs_mindist != 0.):
# force direction on a circle is not well-defined due to
# numerical instability
continue
np.testing.assert_almost_equal(
np.copy(slitpore_constraint.total_force()), ref_force, 10)
# Reset
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_rhomboid(self):
"""Checks that rhomboid constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should)
using the geometrical parameters of (1) a cuboid and (2) a rhomboid.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
# check force calculation of rhomboid constraint
# (1) using a cuboid
interaction_dir = +1 # constraint is directed outwards
length = np.array([-5.0, 6.0, 7.0]) # dimension of the cuboid
corner = np.array(3 * [self.box_l / 2.0])
rhomboid_shape = espressomd.shapes.Rhomboid(
corner=corner,
a=[length[0], 0.0, 0.0], # cube
b=[0.0, length[1], 0.0],
c=[0.0, 0.0, length[2]],
direction=interaction_dir
)
penetrability = False # impenetrable
rhomboid_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=rhomboid_shape, particle_type=1, penetrable=penetrability)
rhomboid_constraint = system.constraints.add(rhomboid_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
p = system.part.add(pos=[self.box_l / 2.0 + length[0] / 2.0,
self.box_l / 2.0 + length[1] / 2.0,
self.box_l / 2.0 - 1], type=0)
system.integrator.run(0) # update forces
self.assertEqual(rhomboid_constraint.min_dist(), 1.)
self.assertEqual(p.f[0], 0.)
self.assertEqual(p.f[1], 0.)
self.assertAlmostEqual(
-p.f[2],
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
x_range = 12
y_range = 12
z_range = 12
for x in range(x_range):
for y in range(y_range):
for z in range(z_range):
pos = np.array(
[x + (self.box_l + length[0] - x_range) / 2.0,
y + (self.box_l + length[1] - y_range) / 2.0,
z + (self.box_l + length[2] - z_range) / 2.0])
shape_dist, shape_dist_vec = rhomboid_shape.calc_distance(
position=pos.tolist())
outside = False
edge_case = False
dist_vec = np.array([0.0, 0.0, 0.0])
# check if outside or inside
if(pos[0] < (self.box_l + length[0] - abs(length[0])) / 2.0 or
pos[0] > (self.box_l + length[0] + abs(length[0])) / 2.0 or
pos[1] < (self.box_l + length[1] - abs(length[1])) / 2.0 or
pos[1] > (self.box_l + length[1] + abs(length[1])) / 2.0 or
pos[2] < (self.box_l + length[2] - abs(length[2])) / 2.0 or
pos[2] > (self.box_l + length[2] + abs(length[2])) / 2.0):
outside = True
if outside:
for i in range(3):
if pos[i] < (self.box_l + length[i] -
abs(length[i])) / 2.0:
dist_vec[i] = pos[i] - (
self.box_l + length[i] - abs(length[i])) / 2.0
elif pos[i] > (self.box_l + length[i] + abs(length[i])) / 2.0:
dist_vec[i] = pos[i] - (
self.box_l + length[i] + abs(length[i])) / 2.0
else:
dist_vec[i] = 0.0
dist = np.linalg.norm(dist_vec)
else:
dist = self.box_l
c1 = pos - corner
c2 = corner + length - pos
abs_c1c2 = np.abs(np.concatenate((c1, c2)))
dist = np.amin(abs_c1c2)
where = np.argwhere(dist == abs_c1c2)
if len(where) > 1:
edge_case = True
for which in where:
if which < 3:
dist_vec[which] = dist * np.sign(c1[which])
else:
dist_vec[which - 3] = -dist * \
np.sign(c2[which - 3])
dist *= -interaction_dir
if edge_case:
for i in range(3):
if shape_dist_vec[i] != 0.0:
self.assertAlmostEqual(
abs(shape_dist_vec[i]), abs(dist_vec[i]))
else:
self.assertAlmostEqual(shape_dist_vec[0], dist_vec[0])
self.assertAlmostEqual(shape_dist_vec[1], dist_vec[1])
self.assertAlmostEqual(shape_dist_vec[2], dist_vec[2])
self.assertAlmostEqual(shape_dist, dist)
# (2) using a rhomboid
rhomboid_shape.a = [5., 5., 0.] # rhomboid
rhomboid_shape.b = [0., 0., 5.]
rhomboid_shape.c = [0., 5., 0.]
p.pos = [
self.box_l / 2 + 2.5,
self.box_l / 2 + 2.5,
self.box_l / 2 - 1]
system.integrator.run(0) # update forces
self.assertEqual(rhomboid_constraint.min_dist(), 1.)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
p.pos = p.pos - [0., 1., 0.]
system.integrator.run(0) # update forces
self.assertAlmostEqual(
rhomboid_constraint.min_dist(), 1.2247448714, 10)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.2247448714),
places=10)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_torus(self):
"""Checks that torus constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
interaction_dir = 1 # constraint is directed inwards
radius = self.box_l / 4.0
tube_radius = self.box_l / 6.0
part_offset = 1.2
system.part.add(
pos=[self.box_l / 2.0, self.box_l / 2.0 + part_offset, self.box_l / 2.0], type=0)
# check force calculation of cylinder constraint
torus_shape = espressomd.shapes.Torus(
center=3 * [self.box_l / 2.0],
normal=[0, 0, 1],
direction=interaction_dir,
radius=radius,
tube_radius=tube_radius)
penetrability = False # impenetrable
torus_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=torus_shape, particle_type=1, penetrable=penetrability)
torus_wall = system.constraints.add(torus_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(torus_constraint.min_dist(),
radius - tube_radius - part_offset)
# test summed forces on torus wall
self.assertAlmostEqual(
torus_wall.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=torus_constraint.min_dist()),
places=10)
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l / 2.0 + 2.0 * radius - part_offset
system.part.add(
pos=[self.box_l / 2.0, y_part2, self.box_l / 2.0], type=0)
system.integrator.run(0)
self.assertAlmostEqual(torus_wall.total_force()[1], 0.0)
self.assertAlmostEqual(torus_wall.total_normal_force(), 2 * tests_common.lj_force(
espressomd, cutoff=2.0, offset=0., eps=1.0, sig=1.0,
r=radius - tube_radius - part_offset))
# Test the geometry of the shape directly
phi_steps = 11
theta_steps = 11
center = np.array([self.box_l / 2.0,
self.box_l / 2.0,
self.box_l / 2.0])
tube_center = np.array([self.box_l / 2.0,
self.box_l / 2.0 + radius,
self.box_l / 2.0])
for distance in {1.02, -0.7}:
start_point = np.array([self.box_l / 2.0,
self.box_l / 2.0 + radius -
tube_radius - distance,
self.box_l / 2.0])
for phi in range(phi_steps):
for theta in range(theta_steps):
# Rotation around the tube
theta_angle = theta / theta_steps * 2.0 * math.pi
theta_rot_matrix = np.array(
[[1.0, 0.0, 0.0],
[0.0, math.cos(theta_angle), -math.sin(theta_angle)],
[0.0, math.sin(theta_angle), math.cos(theta_angle)]])
theta_rot_point = np.dot(
theta_rot_matrix,
start_point - tube_center)
theta_rot_point += tube_center
# Rotation around the center of the torus
phi_angle = phi / phi_steps * 2.0 * math.pi
phi_rot_matrix = np.array(
[[math.cos(phi_angle), -math.sin(phi_angle), 0.0],
[math.sin(phi_angle), math.cos(phi_angle), 0.0],
[0.0, 0.0, 1.0]])
phi_rot_point = np.dot(
phi_rot_matrix,
theta_rot_point - center) + center
shape_dist, _ = torus_shape.calc_distance(
position=phi_rot_point.tolist())
self.assertAlmostEqual(shape_dist, distance)
# check getters
self.assertAlmostEqual(torus_shape.radius, radius)
self.assertAlmostEqual(torus_shape.tube_radius, tube_radius)
np.testing.assert_almost_equal(np.copy(torus_shape.normal), [0, 0, 1])
np.testing.assert_almost_equal(
np.copy(torus_shape.center), 3 * [self.box_l / 2.0])
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/constraint_shape_based.py
|
Python
|
gpl-3.0
| 43,664
|
[
"ESPResSo"
] |
cd8caf14bd8b486e2990c64daa15e42b164ef97799549d852aa2532e8885b802
|
# coding: utf-8
from __future__ import print_function#, unicode_literals
from six import iteritems
#from struct import pack
from pyNastran.bdf.bdf import BDF
from pyNastran.op2.op2 import OP2
from numpy import zeros, searchsorted, arange
#def pack_nodes(fmt, data):
#return ''
def pack_int_array(fmt, data):
return ' '.join([str(val) for val in data]) + '\n'
def pack_float_1d_array(fmt, data):
return ' '.join([str(val) for val in data.ravel()]) + '\n'
def pack_float_3d_array(fmt, data):
msg = ''
for datai in data[0, :, :]:
msgi = ''
for dataii in datai:
msgi += '%s ' % dataii
msg += msgi[:-1] + '\n'
return msg #+ '\n\n'
def pack_float_2d_array(fmt, data):
msg = ''
for datai in data:
msgi = ''
for dataii in datai:
msgi += '%s ' % dataii
msg += msgi[:-1] + '\n'
return msg #+ '\n'
#def pack(fmt, data):
# return ''
def export_to_vtk(model):
bdf_filename = model + '.bdf'
op2_filename = model + '.op2'
vtk_filename = model + '.vtk'
export_to_vtk_filename(bdf_filename, op2_filename, vtk_filename)
print('finished exporting %s' % vtk_filename)
def export_to_vtk_filename(bdf_filename, op2_filename, vtk_filename, debug=False):
with open(vtk_filename, 'w') as vtk_file:
vtk_file.write('# vtk DataFile Version 3.1\n')
vtk_file.write('created by pyNastran\n')
#vtk_file.write('BINARY\n')
vtk_file.write('ASCII\n')
vtk_file.write('DATASET UNSTRUCTURED_GRID\n')
etype_map = {
# line
'CDAMP1' : 3,
'CDAMP2' : 3,
'CDAMP3' : 3,
'CDAMP4' : 3,
'CELAS1' : 3,
'CELAS2' : 3,
'CELAS3' : 3,
'CELAS4' : 3,
'CBAR' : 3,
'CBEAM' : 3,
'CROD' : 3,
'CONROD' : 3,
'CTUBE' : 3,
'CTRIA3' : 5, # triangle
'CQUAD4' : 9, # quad
'CSHEAR' : 9, # quad
# quadratic
'CTRIA6' : 22, # quadratic triangle
#'CQUAD8' : 23/28/30,
'CTETRA' : 10,
'CPENTA' : 13, # wedge
'CPYRAM' : 14,
'CHEXA' : 12, # hex
# quadratic solids
#'CTETRA' : 64,
#'CPENTA' : 65, # wedge
#'CPYRAM' : 66,
#'CHEXA' : 67, # hex
}
bdf = BDF(debug=debug)
bdf.read_bdf(bdf_filename)
op2 = OP2(debug=debug)
op2.read_op2(op2_filename)
out = bdf.get_card_ids_by_card_types()
#print('cards = [', ', '.join(sorted(out.keys())), ']')
grids = sorted(out['GRID'])
spoint = sorted(out['SPOINT'])
epoint = sorted(out['EPOINT'])
ngrid = len(grids)
nspoint = len(spoint)
nepoint = len(epoint)
nnodes = ngrid + nspoint + nepoint
ncrod = len(out['CROD'])
nconrod = len(out['CONROD'])
nctube = len(out['CTUBE'])
ncbeam = len(out['CBEAM'])
ncbar = len(out['CBAR'])
nline = ncrod + nconrod + nctube + ncbeam + ncbar
ncelas1 = len(out['CELAS1'])
ncelas2 = len(out['CELAS2'])
ncelas3 = len(out['CELAS3'])
ncelas4 = len(out['CELAS4'])
ncdamp1 = len(out['CDAMP1'])
ncdamp2 = len(out['CDAMP2'])
ncdamp3 = len(out['CDAMP3'])
ncdamp4 = len(out['CDAMP4'])
n0d = (ncelas1 + ncelas2 + ncelas3 + ncelas4 +
ncdamp1 + ncdamp2 + ncdamp3 + ncdamp4)
nctria3 = len(out['CTRIA3'])
ncquad4 = len(out['CQUAD4'])
nctria6 = len(out['CTRIA6'])
ncquad8 = len(out['CQUAD8'])
ncshear = len(out['CSHEAR'])
nshell = nctria3 + ncquad4 + nctria6 + ncquad8 + ncshear
nctetra4 = len(out['CTETRA'])
ncpyram5 = len(out['CPYRAM'])
ncpenta6 = len(out['CPENTA'])
nchexa8 = len(out['CHEXA'])
nctetra10 = 0
ncpyram8 = 0
ncpenta15 = 0
nchexa20 = 0
nsolid = (nctetra4 + ncpyram5 + ncpenta6 + nchexa8 +
nctetra10 + ncpyram8 + ncpenta15 + nchexa20)
#nelements = n0d + nline + nshell + nsolid
nelements = 0
etypes = [
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4',
'CROD', 'CONROD', 'CTUBE',
'CBAR', 'CBEAM',
'CFAST', 'CBUSH', 'CBUSH1D', 'CBUSH2D',
'CTRIA3', 'CQUAD4', 'CTRIA6', 'CQUAD8', 'CSHEAR',
'CTETRA', 'CPENTA', 'CPYRAM', 'CHEXA',
]
assert len(etypes) == len(set(etypes)), 'there are duplicate etypes'
for etype in etypes:
if etype in out:
ne = len(out[etype])
nelements += ne
nproperties = nelements
bdf_nelements = bdf.nelements
# SPOINT & EPOINT are implicitly defined
xyz_cid0 = zeros((nnodes, 3), dtype='float32')
nids = zeros(nnodes, dtype='float32')
for i, nid in enumerate(grids):
xyz_cid0[i, :] = bdf.nodes[nid].get_position()
nids[:ngrid] = grids
if nspoint:
nids[i:i+nspoint] = spoint
if nepoint:
nids[i+nspoint:] = epoint
nid_fmt = '%ii' % nnodes
xyz_fmt = '%ii' % (nnodes * 3)
vtk_file.write('POINTS %i float\n' % nnodes)
vtk_file.write(pack_float_2d_array(xyz_fmt, xyz_cid0))
nelements = n0d + nline + nshell + nsolid
nmaterials = nelements
eid_fmt = '%ii' % nelements
eids = zeros(nelements, dtype='int32')
cell_types = zeros(nelements, dtype='int32')
pids = zeros(nelements, dtype='int32')
mids = zeros(nelements, dtype='int32')
# we'll add 1 to the slot count of each
# so for a single CROD, it has 2 nodes and 1 extra value (to indicate it's a line)
# for a total of 3
nline_slots = nline * 3
nshell_slots = 4 * nctria3 + 5 * (ncquad4 + ncshear) + 7 * nctria6 + 9 * ncquad8
nsolid_slots = 5 * nctetra4 + 6 * ncpyram5 + 7 * ncpenta6 + 9 * nchexa8
bdf.log.debug('nline=%s nshell=%s nsolid=%s' % (nline, nshell, nsolid))
assert nelements == bdf_nelements, 'nelements=%s bdf.nelements=%s card_count=\n%s' % (
nelements, bdf_nelements, bdf.card_count)
nelements_slots = nline_slots + nshell_slots + nsolid_slots
i = 0
vtk_file.write('CELLS %i %i\n' % (nelements, nelements_slots))
for eid, elem in sorted(iteritems(bdf.elements)):
etype = etype_map[elem.type]
nids2 = searchsorted(nids, elem.node_ids)
nnodesi = len(nids2)
vtk_file.write('%i %s\n' % (nnodesi, str(nids2)[1:-1]))
if elem.type in ['CTETRA', 'CPENTA', 'CHEXA', 'CPYRAM', 'CBEAM', 'CROD', 'CBAR']:
pid = elem.Pid()
mid = elem.Mid()
elif elem.type in ['CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CBUSH', 'CFAST']:
pid = elem.Pid()
mid = 0
elif elem.type in ['CQUAD4', 'CQUAD8', 'CQUADX', 'CQUADX8', 'CQUAD',
'CTRIA3', 'CTRIA6', 'CTRIAX', 'CTRIAX6', 'CSHEAR']:
pid = elem.Pid()
prop = elem.pid
if prop.type in ['PCOMP', 'PCOMPG']:
mid = prop.Mid(0)
elif prop.type in ['PSHELL']:
mid = prop.Mid1()
elif prop.type in ['PSHEAR']:
mid = prop.Mid()
else:
raise NotImplementedError(prop)
elif elem.type in ['CONROD']:
pid = 0
mid = elem.Mid()
else:
raise NotImplementedError(elem)
eids[i] = eid
pids[i] = pid
mids[i] = mid
cell_types[i] = etype
i += 1
assert nelements == bdf_nelements, 'i=%s nelements=%s bdf.nelements=%s' % (i, nelements, bdf_nelements)
#vtk_file.write('\n')
vtk_file.write('CELL_TYPES %i\n' % nelements)
vtk_file.write(pack_int_array(eid_fmt, cell_types))
vtk_file.write('\n')
vtk_file.write('POINT_DATA %i\n' % nnodes)
vtk_file.write('NodeID %i float\n' % nnodes)
vtk_file.write(pack_int_array(nid_fmt, nids))
fmt = '%si' % nelements
if nelements:
vtk_file.write('ElementID %i float\n' % nelements)
vtk_file.write(pack_int_array(eid_fmt, eids))
if nproperties:
vtk_file.write('PropertyID %i float\n' % nproperties)
vtk_file.write(pack_int_array(eid_fmt, pids))
if nmaterials:
vtk_file.write('MaterialID %i float\n' % nmaterials)
vtk_file.write(pack_int_array(eid_fmt, mids))
nodal_cases = [op2.eigenvectors, op2.displacements, op2.velocities, op2.accelerations]
fmt = '%sf' % (nnodes * 6)
for cases in nodal_cases:
keys = list(cases.keys()) # list is needed for PY3
if not keys:
continue
key0 = keys[0]
#print(key0)
node_ids = cases[key0].node_gridtype[:, 0]
if nnodes == len(node_ids):
# every node exists
i = arange(nnodes)
ni = nnodes
else:
# node_ids is a subset of nids
i = searchsorted(nids, node_ids)
ni = len(i)
names = ['T1', 'T2', 'T3', 'R1', 'R2', 'R3']
for isubcase, case in sorted(iteritems(cases)):
if case.is_real:
#if i is None:
#data = case.data
#ni = nnodes
#else:
#data = zeros((nnodes, 6), dtype='float32')
#data[:, i, :] = case.data
data = case.data[:, i, :]
ntimes = case.data.shape[0]
case_type = case.__class__.__name__
for itime in range(ntimes):
if 0:
for icol, name in enumerate(names):
title = '%s_%s_isubcase=%s_itime=%s' % (case_type, name, isubcase, itime)
vtk_file.write('SCALARS %s float\n' % title)
vtk_file.write('LOOKUP_TABLE default\n')
#datai = data[itime, i, icol]
vtk_file.write(pack_float_1d_array(fmt, data[itime, i, icol]))
if 1:
title = '%s_isubcase=%s_itime=%s' % (case_type, isubcase, itime)
#FIELD RealDisplacementArray_FIELD_isubcase=1_itime=0 6
#t1 1 72 float
#0.00764469 0.00762899 ...
vtk_file.write('FIELD %s 6\n' % title)
for icol, name in enumerate(names):
vtk_file.write('%s 1 %s float\n' % (name, ni))
datai = case.data[itime, i, icol]
vtk_file.write(pack_float_1d_array(fmt, data[itime, i, icol]))
if 0:
title = '%s_FIELD_isubcase=%s_itime=%s' % (case_type, isubcase, itime)
vtk_file.write('FIELD %s 6 %i float\n' % (title, ni))
vtk_file.write('LOOKUP_TABLE default\n')
vtk_file.write(pack_float_2d_array(fmt, data[itime, i, :]))
#CELLS 217 1039
def main():
export_to_vtk('solid_bending')
if __name__ == '__main__':
main()
|
saullocastro/pyNastran
|
pyNastran/op2/export_to_vtk.py
|
Python
|
lgpl-3.0
| 11,895
|
[
"VTK"
] |
2b1c1615c4be8059cc5bae2f7cd405f9756e9c5874828542b8df7951231395b5
|
"""
BayesFlare
==========
Provides:
1. A number of pythonic means for handling lightcurve data from the Kepler spacecraft
2. A set of functions for conducting analysis of the lightcurve data to find flaring events
Using the documentation
-----------------------
Documentation for pyFlare is available in the form of docstrings, and this compiled reference
guide. Further information on the methods used are covered in the paper (Pitkin et al, 2014).
The examples in the docstrings assume that `BayesFlare` has been imported as `pf`
>>> import bayesflare as pf
Code snippets are indicated by three greater-than signs
>>> x = 2 + 3
in common with standard usage in Python documentation.
A docstring can be read using the python interpretter's built-in function ``help``
>>> help(pf.plot)
"""
from __future__ import absolute_import
from .data.data import Loader, Lightcurve
from .models import Model, Flare, Transit, Expdecay, Impulse, Gaussian, Step, ModelCurve
from .finder.find import SigmaThresholdMethod, OddsRatioDetector
from .noise.noise import estimate_noise_ps, estimate_noise_tv, make_noise_lightcurve, addNoise, highpass_filter_lightcurve, savitzky_golay, running_median
from .stats import *
from .stats.bayes import Bayes, ParameterEstimationGrid
#from .stats.thresholding import Thresholder
from .misc.misc import nextpow2, mkdir
from .inject.inject import inject_model
from .simulate.simulate import SimLightcurve, simulate_single
|
BayesFlare/bayesflare
|
bayesflare/__init__.py
|
Python
|
gpl-2.0
| 1,479
|
[
"Gaussian"
] |
4ae08cef7129e6bbd39fa21e239a6967a5a922909c8a9299bbab1cf5c658121e
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
from Biskit.tools import *
import re
import tempfile
import Biskit.settings as settings
from Biskit import AmberRstParser
def _use():
print """
Concatenate 2 amber crd/vel files.
amberConcat.py -i sim1.crd sim2.crd -o sim_merged.crd -n n_atoms [-b
-p |int_precission| -rst |last_sim1.rst last_sim2.rst| ]
-n number of atoms (obligatory)
-b traj has box info (3 additional coordinates)
-p when looking for overlapping block, round coord. to p positions
-rst try repairing last frame of sim1, sim2 from given restart file(s)
"""
sys.exit( 0 )
class ParseError( Exception ):
pass
class Concat:
"""
Concatenate 2 Amber-generated crd files.
"""
def __init__( self, f1, f2, out, n_atoms, boxInfo=0, precission=None,
lastRst=None ):
self.f1 = absfile( f1 )
self.f2 = absfile( f2 )
self.o = absfile( out )
self.n = n_atoms
self.box = boxInfo
self.precission = precission
self.lastRst = lastRst
self.lines_per_frame = self.n * 3 / 10
if self.n % 10 != 0:
self.lines_per_frame += 1
if self.box:
self.lines_per_frame += 1
def line2numbers( self, l ):
"""convert line from crd/vel file to list of float numbers"""
xnumber = "-*\d+\.\d+" # optionally negtive number
xspace = ' *' # one or more space char
ex = re.compile('('+xspace+xnumber+')')
match = ex.findall( l )
r = [ round(float(strCrd),3) for strCrd in match ]
return r
def fuzzyEquals( self, l1, l2, prec ):
"""compare two lines of numbers allowing slight discrepancies."""
if prec == None:
return l1 == l2
n1 = self.line2numbers( l1 )
n2 = self.line2numbers( l2 )
for i in range(10):
if not (round(float(n1[i]), prec) == round(float(n2[i]), prec)):
return 0
return 1
def go( self ):
f1 = open( self.f1 )
f2 = open( self.f2 )
fo = open( self.o, 'w' )
f2.readline() ## skip first empty line of second crd
f2_firstLine = f2.readline()
## get first line of repair restart block, if available
rst_firstLine = None
p = None
if self.lastRst:
print "Reading rst file for repair: ", stripFilename(self.lastRst)
p = AmberRstParser( self.lastRst )
rst_firstLine = p.getFirstCrdLine()
## write first empty line
fo.write( f1.readline() )
i = self.lines_per_frame
frame = -1
print "\nReading %s..." % stripFilename( self.f1 ),
for l in f1:
if i == self.lines_per_frame:
i = 0
frame += 1
if frame % 100 == 0 and frame != 0:
flushPrint( '#' )
if self.fuzzyEquals( l, f2_firstLine, self.precission ):
break
if rst_firstLine and self.fuzzyEquals( l, rst_firstLine,
self.precission):
print "Replacing frame %i by frame from restart file" %frame
p.writeCrd( fo, lastAtom=self.n )
if self.box:
fo.write(' 0.000 0.000 0.000\n' )
break
fo.write( l )
i += 1
print "%i frames. %i lines per frame. Last had %i lines." % \
(frame, self.lines_per_frame, i+1)
frame = 0
i = self.lines_per_frame
print "Reading %s..." % stripFilename( self.f2 ),
fo.write( f2_firstLine )
for l in f2:
if i == self.lines_per_frame:
i = 0
frame += 1
if frame % 100 == 0:
flushPrint( '#' )
fo.write( l )
i += 1
print "%i frames. %i lines per frame. Last had %i lines." % \
(frame, self.lines_per_frame, i+1)
f1.close()
f2.close()
fo.close()
def concatMany( fout, n_atoms, box, precission, rst, f1, *fn ):
"""
Hack: Use Concater to concat more than 2 files.
"""
current_out = tempfile.mktemp('_concat.crd', dir=settings.tempDirShared)
rst.reverse()
fin = f1
for f in fn:
if f == fn[-1]:
current_out = fout
else:
current_out = tempfile.mktemp('_concat.crd', dir=settings.tempDirShared)
r = None
if len( rst ) > 0:
r = rst.pop()
c = Concat( fin, f, current_out, n_atoms, box, precission, lastRst=r )
c.go()
fin = current_out
if __name__ == '__main__':
if len( sys.argv ) < 3:
_use()
o = cmdDict( {} )
f1 = o['i'][0]
fn = o['i'][1:]
n_atoms = int( o['n'] )
precission = None
if 'p' in o:
precission = int( o['p'] )
rst = toList( o.get('rst', [] ) )
concatMany( o['o'], n_atoms, o.has_key('b'), precission, rst, f1, *fn )
## c = Concat( f1, f2, o['o'], n_atoms, o.has_key( 'b' ), precission )
## c.go()
|
ostrokach/biskit
|
scripts/Biskit/amberConcat.py
|
Python
|
gpl-3.0
| 6,116
|
[
"Amber"
] |
1e4eb71df3130b8cabe0d75b076ead760039a0658e9874a93b1e137377632045
|
# imports needed
import matplotlib as mpl
mpl.use('Agg')
from smarty.forcefield import *
import openeye
from openeye import oechem
import smarty
from smarty.utils import get_data_filename
from simtk import openmm
from simtk import unit
import numpy as np
import netCDF4 as netcdf
import collections as cl
import pandas as pd
import pymbar
from pymbar import timeseries
import glob
import sys
from smarty.forcefield import generateTopologyFromOEMol
import pdb
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#np.set_printoptions(threshold=np.inf)
#----------------------------------------------------------------------
# CONSTANTS
#----------------------------------------------------------------------
kB = 0.001987204118 #Boltzmann constant (Gas constant) in kcal/(mol*K)
#----------------------------------------------------------------------
# UTILITY FUNCTIONS
#----------------------------------------------------------------------
def constructDataFrame(mol_files):
"""
Construct a pandas dataframe to be populated with computed single molecule properties. Each unique bond, angle and torsion has it's own column for a value
and uncertainty.
Parameters
-----------
mol_files - a list of mol2 files from which we determine connectivity using OpenEye Tools and construct the dataframe using Pandas.
Returns
-----------
df - data frame in form molecules x property id that indicates if a specific property exists for a molecule (1 in cell if yes, 0 if no)
"""
molnames = []
for i in mol_files:
molname = i.replace(' ', '')[:-5]
molname = molname.rsplit('/' ,1)[1]
molnames.append(molname)
OEMols=[]
for i in mol_files:
mol = oechem.OEGraphMol()
ifs = oechem.oemolistream(i)
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
ifs.SetFlavor(oechem.OEFormat_MOL2, flavor)
oechem.OEReadMolecule(ifs, mol)
oechem.OETriposAtomNames(mol)
OEMols.append(mol)
ff = ForceField(get_data_filename('/data/forcefield/smirff99Frosst.ffxml'))
labels = []
lst0 = []
lst1 = []
lst2 = []
lst00 = [[] for i in molnames]
lst11 = [[] for i in molnames]
lst22 = [[] for i in molnames]
lst_0 = [[] for i in molnames]
lst_1 = [[] for i in molnames]
lst_2 = [[] for i in molnames]
for ind, val in enumerate(OEMols):
label = ff.labelMolecules([val], verbose = False)
for entry in range(len(label)):
for bond in label[entry]['HarmonicBondGenerator']:
lst0.extend([str(bond[0])])
lst00[ind].extend([str(bond[0])])
lst_0[ind].append([str(bond[0]),str(bond[2])])
for angle in label[entry]['HarmonicAngleGenerator']:
lst1.extend([str(angle[0])])
lst11[ind].extend([str(angle[0])])
lst_1[ind].append((str(angle[0]),str(angle[2])))
for torsion in label[entry]['PeriodicTorsionGenerator']:
lst2.extend([str(torsion[0])])
lst22[ind].extend([str(torsion[0])])
lst_2[ind].append([str(torsion[0]),str(torsion[2])])
# Return unique strings from lst0
cols0 = set()
for x in lst0:
cols0.add(x)
cols0 = list(cols0)
# Generate data lists to populate dataframe
data0 = [[] for i in range(len(lst00))]
for val in cols0:
for ind,item in enumerate(lst00):
if val in item:
data0[ind].append(1)
else:
data0[ind].append(0)
# Return unique strings from lst1
cols1 = set()
for x in lst1:
cols1.add(x)
cols1 = list(cols1)
# Generate data lists to populate frame (1 means val in lst11 was in cols1, 0 means it wasn't)
data1 = [[] for i in range(len(lst11))]
for val in cols1:
for ind,item in enumerate(lst11):
if val in item:
data1[ind].append(1)
else:
data1[ind].append(0)
# Return unique strings from lst2
cols2 = set()
for x in lst2:
cols2.add(x)
cols2 = list(cols2)
# Generate data lists to populate frame (1 means val in lst22 was in cols2, 0 means it wasn't)
data2 = [[] for i in range(len(lst22))]
for val in cols2:
for ind,item in enumerate(lst22):
if val in item:
data2[ind].append(1)
else:
data2[ind].append(0)
# Clean up clarity of column headers and molecule names
cols0t = ["BondEquilibriumLength " + i for i in cols0]
cols0temp = ["BondEquilibriumLength_std " + i for i in cols0]
cols0 = cols0t + cols0temp
cols1t = ["AngleEquilibriumAngle " + i for i in cols1]
cols1temp = ["AngleEquilibriumAngle_std " + i for i in cols1]
cols1 = cols1t + cols1temp
cols2t = ["TorsionFourier1 " + i for i in cols2]
cols2temp = ["TorsionFourier1_std " + i for i in cols2]
cols2 = cols2t + cols2temp
data0 = [i+i for i in data0]
data1 = [i+i for i in data1]
data2 = [i+i for i in data2]
# Construct dataframes
df0 = pd.DataFrame(data = data0, index = molnames, columns = cols0)
df0['molecule'] = df0.index
df1 = pd.DataFrame(data = data1, index = molnames, columns = cols1)
df1['molecule'] = df1.index
df2 = pd.DataFrame(data = data2, index = molnames, columns = cols2)
df2['molecule'] = df2.index
dftemp = pd.merge(df0, df1, how = 'outer', on = 'molecule')
df = pd.merge(dftemp, df2, how = 'outer', on = 'molecule')
return df, lst_0, lst_1, lst_2
#------------------------------------------------------------------
def ComputeBondsAnglesTorsions(xyz, bonds, angles, torsions):
"""
compute a 3 2D arrays of bond lengths for each frame: bond lengths in rows, angle lengths in columns
Parameters
-----------
xyz - xyz files, an array of length-2 arrays
bonds, angles, torsions - numbered atom indices tuples associated with all unqiue bonds, angles and torsions
Returns
----------
bond_dist, angle_dist, torsion_dist - computed bonds, angles and torsions across the provided time series
"""
niterations = xyz.shape[0] # no. of frames
natoms = xyz.shape[1]
nbonds = np.shape(bonds)[0]
nangles = np.shape(angles)[0]
ntorsions = np.shape(torsions)[0]
bond_dist = np.zeros([niterations,nbonds])
angle_dist = np.zeros([niterations,nangles])
torsion_dist = np.zeros([niterations,ntorsions])
for n in range(niterations):
xyzn = xyz[n] # coordinates this iteration
bond_vectors = np.zeros([nbonds,3])
for i, bond in enumerate(bonds):
bond_vectors[i,:] = xyzn[bond[0]] - xyzn[bond[1]] # calculate the length of the vector
bond_dist[n,i] = np.linalg.norm(bond_vectors[i]) # calculate the bond distance
# we COULD reuse the bond vectors and avoid subtractions, but would involve a lot of bookkeeping
# for now, just recalculate
bond_vector1 = np.zeros(3)
bond_vector2 = np.zeros(3)
bond_vector3 = np.zeros(3)
for i, angle in enumerate(angles):
bond_vector1 = xyzn[angle[0]] - xyzn[angle[1]] # calculate the length of the vector
bond_vector2 = xyzn[angle[1]] - xyzn[angle[2]] # calculate the length of the vector
dot = np.dot(bond_vector1,bond_vector2)
len1 = np.linalg.norm(bond_vector1)
len2 = np.linalg.norm(bond_vector2)
angle_dist[n,i] = np.arccos(dot/(len1*len2)) # angle in radians
for i, torsion in enumerate(torsions):
# algebra from http://math.stackexchange.com/questions/47059/how-do-i-calculate-a-dihedral-angle-given-cartesian-coordinates, Daniel's answer
bond_vector1 = xyzn[torsion[0]] - xyzn[torsion[1]] # calculate the length of the vector
bond_vector2 = xyzn[torsion[1]] - xyzn[torsion[2]] # calculate the length of the vector
bond_vector3 = xyzn[torsion[2]] - xyzn[torsion[3]] # calculate the length of the vector
bond_vector1 /= np.linalg.norm(bond_vector1)
bond_vector2 /= np.linalg.norm(bond_vector2)
bond_vector3 /= np.linalg.norm(bond_vector3)
n1 = np.cross(bond_vector1,bond_vector2)
n2 = np.cross(bond_vector2,bond_vector3)
m = np.cross(n1,bond_vector2)
x = np.dot(n1,n2)
y = np.dot(m,n2)
torsion_dist[n,i] = np.arctan2(y,x) # angle in radians
return bond_dist, angle_dist, torsion_dist
#------------------------------------------------------------------
def calculateBondsAnglesTorsionsStatistics(properties, bond_dist, angle_dist, torsion_dist, bonds, angles, torsions, torsionbool):
"""
Parameters
-----------
properties: A list of property strings we want value for
bond_dist: a Niterations x nbonds list of bond lengths
angle_dist: a Niterations x nbonds list of angle angles (in radians)
torsion_dist: a Niterations x nbonds list of dihedral angles (in radians)
bonds: a list of bonds (ntorsions x 2)
angles: a list of angles (ntorsions x 3)
torsions: a list of torsion atoms (ntorsions x 4)
torsionbool: boolean which suppresses torsion statistical analysis if False
# we assume the bond_dist / bonds , angle_dist / angles, torsion_dist / torsion were constucted in the same order.
PropertyDict - dictionary of average value of bond, angle or torsion across time series with associated uncertainty in mean and uncertainty in uncertainty
"""
PropertyDict = dict()
nbonds = np.shape(bonds)[0]
nangles = np.shape(angles)[0]
ntorsions = np.shape(torsions)[0]
nsamp = np.shape(bond_dist)[0]-1 #WARNING: assumes data points uncorrelated!
for p in properties:
AtomList = p.split(' ', 1)[1:] # figure out which bond this is:
AtomList = [i.lstrip('[').rstrip(']') for i in AtomList] # we assume bond_dist /bond is in the same order.
for i in AtomList:
AtomList = i.strip().split(',')
AtomList = map(int, AtomList)
if 'BondEquilibriumLength' in p:
for i in range(nbonds):
if np.array_equal(AtomList, bonds[i]):
value = np.mean(bond_dist[:,i])
uncertainty = np.std(bond_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value,uncertainty]
if 'BondEquilibriumLength_std' in p:
for i in range(nbonds):
if np.array_equal(AtomList, bonds[i]):
value = np.std(bond_dist[:,i])
uncertainty = np.std(bond_dist[:,i])**2/np.sqrt(nsamp/2)
PropertyDict[p] = [value,uncertainty]
if 'AngleEquilibriumAngle' in p:
for i in range(nangles):
if np.array_equal(AtomList, angles[i]):
value = np.mean(angle_dist[:,i])
uncertainty = np.std(angle_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value,uncertainty]
if torsionbool==True:
if 'TorsionFourier1' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.mean(torsion_dist[:,i])
uncertainty = np.std(torsion_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value,uncertainty]
if 'TorsionFourier1_std' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.std(torsion_dist[:,i])
uncertainty = np.std(torsion_dist[:,i])**2/np.sqrt(nsamp/2)
PropertyDict[p] = [value,uncertainty]
# Circular distribution alternate for torsion calculation
if 'TorsionFourier1' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.array([])
for j in range(nsamp):
val = np.real((np.exp(cmath.sqrt(-1)*torsion_dist[:,i]))**j)
value = np.append(value, val)
value = (1/nsamp)*np.sum(value)
uncertainty = np.std(torsion_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value, uncertainty]
if 'TorsionFourier1_std' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.std(torsion_dist[:,i])
uncertainty = np.std(torsion_dist[:,i])**2/np.sqrt(nsamp/2)
PropertyDict[p] = [value,uncertainty]
else:
pass
return PropertyDict
#------------------------------------------------------------------
def get_properties_from_trajectory(mol2, ncfiles, torsionbool=True):
"""
take multiple .nc files with identifier names and a pandas dataframe with property
names for single atom bonded properties (including the atom numbers) and populate
those property pandas dataframe.
Parameters
-----------
mol2 - mol2 files used to identify and index molecules
ncfiles - a list of trajectories in netcdf format. Names should correspond to the identifiers in the pandas dataframe.
torsionbool - boolean value passed to computeBondsAnglesTorsionsStatistics() to supress torsion statistics analysis. Default set to True (torsion calculatio n not supressed).
Returns
----------
bond_dist - calculated bond distribution across trajectory
angle_dist - calculated angle distribution across trajectory
torsion_dist - calculated torsion distribution across trajectory
Properties - dictionary of an average value of bond, angle or torsion across time series with associated uncertainty in mean and uncertainty in uncertainty
"""
PropertiesPerMolecule = dict()
# here's code that generate list of properties to calculate for each molecule and
# populate PropertiesPerMolecule
mol_files = mol2
df = constructDataFrame(mol_files)
MoleculeNames = df.molecule.tolist()
properties = df.columns.values.tolist()
for ind, val in enumerate(MoleculeNames):
defined_properties = list()
for p in properties:
if (p is not 'molecule') and ('_std' not in p):
if df.iloc[ind][p] != 0:
defined_properties.append(p)
PropertiesPerMolecule[val] = defined_properties
AtomDict = dict()
AtomDict['MolName'] = list()
for fname in ncfiles:
MoleculeName = fname.split('.')[0]
AtomDict['MolName'].append(MoleculeName)
# extract the xyz coordinate for each frame
data = netcdf.Dataset(fname)
xyz = data.variables['coordinates']
# what is the property list for this molecule
PropertyNames = PropertiesPerMolecule[MoleculeName]
# extract the bond/angle/torsion lists
AtomDict['Bond'] = list()
AtomDict['Angle'] = list()
AtomDict['Torsion'] = list()
# which properties will we use to construct the bond list
ReferenceProperties = ['BondEquilibriumLength','AngleEquilibriumAngle','TorsionFourier1']
for p in PropertyNames:
PropertyName = p.split(' ', 1)[0]
AtomList = p.split(' ', 1)[1:]
AtomList = [i.lstrip('[').rstrip(']') for i in AtomList]
for i in AtomList:
AtomList = i.strip().split(',')
AtomList = map(int, AtomList)
if any(rp in p for rp in ReferenceProperties):
if 'Bond' in p:
AtomDict['Bond'].append(AtomList)
if 'Angle' in p:
AtomDict['Angle'].append(AtomList)
if 'Torsion' in p:
AtomDict['Torsion'].append(AtomList)
bond_dist, angle_dist, torsion_dist = computeBondsAnglesTorsions(xyz,
AtomDict['Bond'],
AtomDict['Angle'],
AtomDict['Torsion'])
Properties = calculateBondsAnglesTorsionsStatistics(PropertyNames,
bond_dist, angle_dist, torsion_dist,
AtomDict['Bond'], AtomDict['Angle'], AtomDict['Torsion'], torsionbool)
#Put properties back in dataframe and return
return [bond_dist, angle_dist, torsion_dist, Properties]
#------------------------------------------------------------------
def read_col(filename,colname,frames):
"""
Reads in columns from .csv outputs of OpenMM StateDataReporter
Parameters
-----------
filename (string) - the path to the folder of the csv
colname (string) - the column you wish to extract from the csv
frames (integer) - the number of frames you wish to extract
Returns
----------
dat - the pandas column series written as a matrix
"""
#print "--Reading %s from %s/..." % (colname,filename)
# Read in file output as pandas df
df = pd.read_csv(filename, sep= ',')
# Read values direct from column into numpy array
dat = df.as_matrix(columns = colname)
dat = dat[-frames:]
return dat
#------------------------------------------------------------------
def readtraj(ncfiles,indkeep):
"""
Take multiple .nc files and read in coordinates in order to re-valuate energies based on parameter changes
Parameters
-----------
ncfiles - a list of trajectories in netcdf format
Returns
----------
data - all of the data contained in the netcdf file
xyzn - the coordinates from the netcdf in angstroms
"""
data = netcdf.Dataset(ncfiles)
xyz = data.variables['coordinates']
xyzn = unit.Quantity(xyz[-indkeep:], unit.angstroms)
return data, xyzn
#------------------------------------------------------------------
def get_energy(system, positions):
"""
Return the potential energy.
Parameters
----------
system : simtk.openmm.System
The system to check
positions : simtk.unit.Quantity of dimension (natoms,3) with units of length
The positions to use
Returns
---------
energy
"""
integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)
context = openmm.Context(system, integrator)
context.setPositions(positions)
state = context.getState(getEnergy=True)
energy = state.getPotentialEnergy() / unit.kilocalories_per_mole
return energy
#------------------------------------------------------------------
def new_param_energy(mol2, traj, smirkss, N_k, params, paramtype, samps, indkeep, *coords):
"""
Return potential energies associated with specified parameter perturbations.
Parameters
----------
mol2: mol2 file associated with molecule of interest used to construct OEMol object
traj: trajectory from the simulation ran on the given molecule
smirkss: list of smirks strings we wish to apply parameter changes to (Only changing 1 type of string at a time now. All bonds, all angles or all torsions)
N_k: numpy array of number of samples per state
params: a numpy array of the parameter values we wish to test
paramtype: the type of ff param being edited
BONDS - k (bond force constant), length (equilibrium bond length)
ANGLES - k (angle force constant), angle (equilibrium bond angle)
TORSIONS - k{i} (torsion force constant), idivf{i} (torsional barrier multiplier), periodicity{i} (periodicity of the torsional barrier), phase{i}
(phase offset of the torsion)
NONBONDED - epsilon and rmin_half (where epsilon is the LJ parameter epsilon and rmin_half is half of the LJ parameter rmin)
samps: samples per energy calculation
Returns
-------
energies: a list of the energies associated with the forcfield parameters used as input
"""
#-------------------
# PARAMETERS
#-------------------
params = params
N_k = N_k
ncfiles = traj
# Determine number of simulations
K = np.size(N_k)
#if np.shape(params) != np.shape(N_k): raise "K_k and N_k must have same dimensions"
# Determine max number of samples to be drawn from any state
#-------------
# SYSTEM SETUP
#-------------
verbose = False # suppress echos from OEtoolkit functions
ifs = oechem.oemolistream(mol2)
mol = oechem.OEMol()
# This uses parm@frosst atom types, so make sure to use the forcefield-flavor reader
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)
oechem.OEReadMolecule(ifs, mol )
# Perceive tripos types
oechem.OETriposAtomNames(mol)
# Get positions for use below
if not coords:
data, xyz = readtraj(traj,indkeep)
#indkeep = int(lentraj*perckeep)
xyzn = xyz[-indkeep:]
else:
xyzn = coords
# Load forcefield file
ffxml = get_data_filename('forcefield/smirff99Frosst.ffxml')
ff = ForceField(ffxml)
# Generate a topology
from smarty.forcefield import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(mol)
#-----------------
# MAIN
#-----------------
# Calculate energies
energies = np.zeros([len(smirkss),len(params),samps],np.float64)
for inds,s in enumerate(smirkss):
temp0 = np.zeros([len(params),samps],np.float64)
param = ff.getParameter(smirks=s)
for ind,val in enumerate(params):
for p in paramtype:
temp1 = np.zeros(samps,np.float64)
for a,b in zip(val,p):
param[b] = str(a)
ff.setParameter(param, smirks = s)
system = ff.createSystem(topology, [mol], verbose=verbose)
for i,a in enumerate(xyzn):
e = np.float(get_energy(system, a))
energies[inds,ind,i] = e
return energies, xyzn, system
#------------------------------------------------------------------
def get_small_mol_dict(mol2, traj):
"""
Return dictionary specifying the bond, angle and torsion indices to feed to ComputeBondsAnglesTorsions()
Parameters
----------
mol2: mol2 file associated with molecule of interest used to determine atom labels
traj: trajectory from the simulation ran on the given molecule
Returns
-------
AtomDict: a dictionary of the bond, angle and torsion indices for the given molecule
"""
PropertiesPerMolecule = dict()
mol_files = []
for i in mol2:
temp = i
mol_files.append(temp)
df,lst_0,lst_1,lst_2 = constructDataFrame(mol_files)
MoleculeNames = df.molecule.tolist()
properties = df.columns.values.tolist()
#print MoleculeNames
for ind, val in enumerate(MoleculeNames):
defined_properties = list()
for p in properties:
if (p is not 'molecule') and ('_std' not in p):
if df.iloc[ind][p] != 0:
defined_properties.append(p)
PropertiesPerMolecule[val] = defined_properties
AtomDict = dict()
AtomDict['MolName'] = list()
for fname in traj:
MoleculeName = fname.split('.')[0][8:]
AtomDict['MolName'].append(MoleculeName)
# what is the property list for this molecule
PropertyNames = PropertiesPerMolecule[MoleculeName]
# extract the bond/angle/torsion lists
AtomDict['Bond'] = list()
AtomDict['Angle'] = list()
AtomDict['Torsion'] = list()
# which properties will we use to construct the bond list
ReferenceProperties = ['BondEquilibriumLength','AngleEquilibriumAngle','TorsionFourier1']
for p in PropertyNames:
PropertyName = p.split(' ', 1)[0]
AtomList = p.split(' ', 1)[1:]
AtomList = [i.lstrip('[').rstrip(']') for i in AtomList]
for i in AtomList:
AtomList = i.strip().split(',')
AtomList = map(int, AtomList)
if any(rp in p for rp in ReferenceProperties):
if 'Bond' in p:
AtomDict['Bond'].append(AtomList)
if 'Angle' in p:
AtomDict['Angle'].append(AtomList)
if 'Torsion' in p:
AtomDict['Torsion'].append(AtomList)
return AtomDict,lst_0,lst_1,lst_2
#------------------------------------------------------------------
def subsampletimeseries(timeser,xyzn,N_k):
"""
Return a subsampled timeseries based on statistical inefficiency calculations.
Parameters
----------
timeser: the timeseries to be subsampled
xyzn: the coordinates associated with each frame of the timeseries to be subsampled
N_k: original # of samples in each timeseries
Returns
---------
N_k_sub: new number of samples per timeseries
ts_sub: the subsampled timeseries
xyz_sub: the subsampled configuration series
"""
# Make a copy of the timeseries and make sure is numpy array of floats
ts = timeser
xyz = xyzn
# initialize array of statistical inefficiencies
g = np.zeros(len(ts),np.float64)
for i,t in enumerate(ts):
if np.count_nonzero(t)==0:
g[i] = np.float(1.)
print "WARNING FLAG"
else:
g[i] = timeseries.statisticalInefficiency(t)
N_k_sub = np.array([len(timeseries.subsampleCorrelatedData(t,g=b)) for t, b in zip(ts,g)])
ind = [timeseries.subsampleCorrelatedData(t,g=b) for t,b in zip(ts,g)]
#xyz_sub = np.array([unit.Quantity(c[i], unit.angstroms) for c,i in zip(xyz,ind)])
if (N_k_sub == N_k).all():
ts_sub = ts
xyz_sub = xyz
print "No sub-sampling occurred"
else:
print "Sub-sampling..."
ts_sub = np.array([t[timeseries.subsampleCorrelatedData(t,g=b)] for t,b in zip(ts,g)])
for c in xyz:
xyz_sub = [c[timeseries.subsampleCorrelatedData(t,g=b)] for t,b in zip(ts,g)]
return ts_sub, N_k_sub, xyz_sub, ind
#------------------------------------------------------------------
def reject_outliers(data, m=2):
return data[abs(data - np.mean(data)) < m * np.std(data)]
#------------------------------------------------------------------
# MAIN
#-----------------------------------------------------------------
# PARAMETERS
#-----------------------------------------------------------------
#N_k = np.array([100, 100, 100, 100, 100])
#N_k = np.array([100,100])
#N_k_orig = 10000.
#pctkeep = 0.8
indkeep = 4000
N_k= np.array([4000])
K = np.size(N_k)
N_max = np.max(N_k)
K_extra_vals = np.arange(0.08,0.25,0.01)
#K_k = np.array([[106], [104], [102], [100], [98]])
#K_k = np.array([[104.],[100.]])
#K_k = np.array([[680.]])
#K_k = np.array([[680.]])
K_k = np.array([[[1.090]] for val in K_extra_vals])
#K_extra = np.array([[96], [99], [103], [105], [108]]) # unsampled force constants
#K_extra = np.array([[110.],[98.]])
#K_extra = np.array([[600.]])
K_extra = np.array([[[val]] for val in K_extra_vals])
paramtype = [['k1']]
obstype = 'Torsion'
#mol2 = [['molecules/AlkEthOH_r0.mol2'],['molecules/AlkEthOH_r48.mol2'],['molecules/AlkEthOH_r51.mol2'],['molecules/AlkEthOH_c581.mol2'],['molecules/AlkEthOH_c100.mol2'],['molecules/AlkEthOH_c1161.mol2'],['molecules/AlkEthOH_c1266.mol2'],['molecules/AlkEthOH_c38.mol2'],['molecules/AlkEthOH_r118.mol2'],['molecules/AlkEthOH_r12.mol2']]
#mol2 = [['molecules/AlkEthOH_r0.mol2'],['molecules/AlkEthOH_c581.mol2'],['molecules/AlkEthOH_c100.mol2'],['molecules/AlkEthOH_c1266.mol2'],['molecules/AlkEthOH_r51.mol2'],['molecules/AlkEthOH_r48.mol2']]
mol2 = [['Mol2_files/'+sys.argv[1]+'.mol2']]
#mol2en = ['molecules/AlkEthOH_r0.mol2','molecules/AlkEthOH_r48.mol2','molecules/AlkEthOH_r51.mol2','molecules/AlkEthOH_c581.mol2','molecules/AlkEthOH_c100.mol2','molecules/AlkEthOH_c1161.mol2','molecules/AlkEthOH_c1266.mol2','molecules/AlkEthOH_c38.mol2','molecules/AlkEthOH_r118.mol2','molecules/AlkEthOH_r12.mol2']
mol2en = [val[0] for val in mol2]
#traj = ['traj/AlkEthOH_r0.nc','traj/AlkEthOH_r48.nc','traj/AlkEthOH_r51.nc','traj/AlkEthOH_c581.nc','traj/AlkEthOH_c100.nc','traj/AlkEthOH_c1161.nc','traj/AlkEthOH_c1266.nc','traj/AlkEthOH_c38.nc','traj/AlkEthOH_r118.nc','traj/AlkEthOH_r12.nc']
#traj = ['traj/AlkEthOH_r0.nc','traj/AlkEthOH_c581.nc','traj/AlkEthOH_c100.nc','traj/AlkEthOH_c1266.nc','traj/AlkEthOH_r51.nc','traj/AlkEthOH_r48.nc']
traj = ['traj4ns/'+sys.argv[1]+'.nc']
#trajs = [['traj/AlkEthOH_r0.nc'],['traj/AlkEthOH_r48.nc'],['traj/AlkEthOH_r51.nc'],['traj/AlkEthOH_c581.nc'],['traj/AlkEthOH_c100.nc'],['traj/AlkEthOH_c1161.nc'],['traj/AlkEthOH_c1266.nc'],['traj/AlkEthOH_c38.nc'],['traj/AlkEthOH_r118.nc'],['traj/AlkEthOH_r12.nc']]
#trajs = [['traj/AlkEthOH_r0.nc'],['traj/AlkEthOH_c581.nc']]
trajs = [[val] for val in traj]
smirkss = ['[#6X4:1]-[#6X4:2]-[#8X2H1:3]-[#1:4]']
trajstest = [[[] for i in K_extra] for _ in traj]
for ind,val in enumerate(trajs):
for ind1,val1 in enumerate(K_extra):
trajstest[ind][ind1] = [val[0][:-3]+'_'+smirkss[0]+'_k1'+str(val1[0][0])+'.nc']
# Calculate energies at various parameters of interest
#energies, xyzn, system = new_param_energy(mol2en,traj, smirkss, N_k, K_k, paramtype, N_max)
#energiesnew, xyznnew, systemnew = new_param_energy(mol2en, traj, smirkss, N_k, K_extra, paramtype, N_max)
# Create lists to store data that will eventually be written in pandas df and saved as csv/json/pkl
molnamedf = []
smirksdf = []
obstypedf = []
paramtypedf = []
newparamval = []
N_subsampleddf = []
percentshiftdf = []
E_expectdf = []
dE_expectdf = []
dE_bootdf = []
E_stddevawaydf = []
Enew_expectdf = []
dEnew_expectdf = []
dEnew_bootdf = []
Enew_stddevawaydf = []
A_expectdf = []
dA_expectdf = []
dA_bootdf = []
A_stddevawaydf = []
Anew_sampleddf = []
Anew_expectdf = []
dAnew_expectdf = []
dAnew_bootdf = []
Anew_stddevawaydf = []
varAnew_bootdf = []
varAnew_sampdf = []
altvarAnew_bootdf = []
dvarAnew_bootdf = []
altdvarAnew_bootdf = []
varAnew_bootdf2 = []
altvarAnew_bootdf2 = []
dvarAnew_bootdf2 = []
altdvarAnew_bootdf2 = []
A_boot_new_sampdf = []
dA_boot_new_sampdf = []
# Return AtomDict needed to feed to ComputeBondsAnglesTorsions()
for ind,(i,j) in enumerate(zip(mol2,traj)):
AtomDict,lst_0,lst_1,lst_2 = get_small_mol_dict(i, [j])
mylist = [ii[1] for ii in lst_2[0]]
myset = set(mylist)
poplist = np.zeros([len(myset)],np.float64)
for b,k in enumerate(myset):
print "%s occurs %s times" %(k, mylist.count(k))
poplist[b] = mylist.count(k)
pctlist = 100.*poplist/sum(poplist)
pctdict = dict()
for c,k in enumerate(myset):
pctdict[k] = pctlist[c]
print '#################################################################################'
Atomdictmatches = []
for sublist in lst_2[0]:
if sublist[1] == smirkss[0]:
Atomdictmatches.append(sublist[0])
if not Atomdictmatches:
print 'No matches found'
continue
Atomdictmatchinds = []
for yy in Atomdictmatches:
for z,y in enumerate(AtomDict[obstype]):
if yy == str(AtomDict[obstype][z]):
Atomdictmatchinds.append(z)
obs_ind = Atomdictmatchinds[0]
# Calculate energies at various parameters of interest
for indparam,valparam in enumerate(K_extra):
energies, xyzn, system = new_param_energy(mol2en[ind],j, smirkss, N_k, K_k[indparam], paramtype, N_max, indkeep)
energiesnew, xyznnew, systemnew = new_param_energy(mol2en[ind],j, smirkss, N_k, K_extra[indparam], paramtype, N_max, indkeep)
xyznsampled = [[] for i in trajs[ind]]
A = np.zeros([K,N_max],np.float64)
for i,x in enumerate(trajs[ind]):
coord = readtraj(x,indkeep)[1]
xyznsampled[i] = coord
obs = ComputeBondsAnglesTorsions(coord,AtomDict['Bond'],AtomDict['Angle'],AtomDict['Torsion'])[0]# Compute angles and return array of angles
numatom = len(obs[0]) # get number of unique angles in molecule
timeser = [obs[:,d] for d in range(numatom)] # re-organize data into timeseries
A[i] = timeser[obs_ind] # pull out single angle in molecule for test case
xyznnewtest = [[] for i in trajstest[ind][indparam]]
Anewtest = np.zeros([K,N_max],np.float64)
for i,x in enumerate(trajstest[ind][indparam]):
coordtest = readtraj(x,indkeep)[1]
xyznnewtest[i] = coordtest
obstest = ComputeBondsAnglesTorsions(coordtest,AtomDict['Bond'],AtomDict['Angle'],AtomDict['Torsion'])[0]# Compute angles and return array of angles
numatomtest = len(obstest[0]) # get number of unique angles in molecule
timesertest = [obstest[:,d] for d in range(numatomtest)] # re-organize data into timeseries
Anewtest[i] = timesertest[obs_ind] # pull out single angle in molecule for test case
# Subsample timeseries and return new number of samples per state
A_sub, N_kA, xyzn_A_sub, indA = subsampletimeseries(A, xyznsampled, N_k)
En_sub, N_kEn, xyzn_En_sub, indEn = subsampletimeseries(energies[0], xyznsampled, N_k)
Ennew_sub, N_kEnnew, xyzn_Ennew_sub, indEnnew = subsampletimeseries(energiesnew[0], xyznsampled, N_k)
A_sub_test,N_kA_test,xyzn_A_test,indAtest = subsampletimeseries(Anewtest,xyznnewtest,N_k)
for a,b,c,d in zip(N_kA,N_kEn,N_kEnnew,N_kA_test):
N_kF = np.array([min(a,b,c,d)])
A_kn = np.zeros([sum(N_kF)],np.float64)
A_knnew = np.zeros([sum(N_kF)],np.float64)
count = 0
for x1,x2 in zip(A_sub,A_sub_test):
for y1,y2 in zip(x1,x2):
A_kn[count] = y1
A_knnew[count] = y2
count += 1
if count > (sum(N_kF)-1):
break
#--------------------------------------------------------------
# Re-evaluate potenitals at all subsampled coord and parameters
#--------------------------------------------------------------
verbose = False # suppress echos from OEtoolkit functions
ifs = oechem.oemolistream(mol2en[ind])
mol = oechem.OEMol()
# This uses parm@frosst atom types, so make sure to use the forcefield-flavor reader
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)
oechem.OEReadMolecule(ifs, mol )
# Perceive tripos types
oechem.OETriposAtomNames(mol)
# Load forcefield file
ffxml = get_data_filename('forcefield/smirff99Frosst.ffxml')
ff = ForceField(ffxml)
# Generate a topology
from smarty.forcefield import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(mol)
# Re-calculate energies
E_kn = np.zeros([len(K_k[indparam]),sum(N_kEn)],np.float64)
for inds,s in enumerate(smirkss):
param = ff.getParameter(smirks=s)
for indss,vals in enumerate(K_k[indparam]):
count = 0
for p in paramtype:
for a,b in zip(vals,p):
param[b] = str(a)
ff.setParameter(param, smirks = s)
system = ff.createSystem(topology, [mol], verbose=verbose)
while count < sum(N_kEn):
for k_ind, pos in enumerate(xyzn_En_sub):
for i,a in enumerate(pos):
e = np.float(get_energy(system, a))
E_kn[indss,count] = e
count += 1
E_knnew = np.zeros([len(K_extra[indparam]),sum(N_kEn)],np.float64)
for inds,s in enumerate(smirkss):
param = ff.getParameter(smirks=s)
for indss,vals in enumerate(K_extra[indparam]):
count = 0
for p in paramtype:
for a,b in zip(vals,p):
param[b] = str(a)
ff.setParameter(param, smirks = s)
system = ff.createSystem(topology, [mol], verbose=verbose)
while count < sum(N_kEn):
for k_ind, pos in enumerate(xyzn_En_sub):
for i,a in enumerate(pos):
e = np.float(get_energy(system, a))
E_knnew[indss,count] = e
count += 1
# Post process energy distributions to find expectation values, analytical uncertainties and bootstrapped uncertainties
#T_from_file = read_col('StateData/data.csv',["Temperature (K)"],100)
Temp_k = 300.#T_from_file
T_av = 300.#np.average(Temp_k)
nBoots = 200
beta_k = 1 / (kB*T_av)
bbeta_k = 1 / (kB*Temp_k)
#################################################################
# Compute reduced potentials
#################################################################
print "--Computing reduced potentials..."
# Initialize matrices for u_kn/observables matrices and expected value/uncertainty matrices
u_kn = np.zeros([K, sum(N_kF)], dtype=np.float64)
E_kn_samp = np.zeros([K,sum(N_kF)],np.float64)
u_knnew = np.zeros([K,sum(N_kF)], np.float64)
E_knnew_samp = np.zeros([K,sum(N_kF)], np.float64)
A_kn_samp = np.zeros([sum(N_kF)],np.float64)
A_knnew_samp = np.zeros([sum(N_kF)],np.float64)
A2_kn = np.zeros([sum(N_kF)],np.float64)
A2_knnew = np.zeros([sum(N_kF)],np.float64)
nBoots_work = nBoots + 1
allE_expect = np.zeros([K,nBoots_work], np.float64)
allA_expect = np.zeros([K,nBoots_work],np.float64)
allE2_expect = np.zeros([K,nBoots_work], np.float64)
dE_expect = np.zeros([K], np.float64)
allE_expectnew = np.zeros([K,nBoots_work], np.float64)
allE2_expectnew = np.zeros([K,nBoots_work], np.float64)
dE_expectnew = np.zeros([K], np.float64)
dA_expect = np.zeros([K],np.float64)
dA_expectnew = np.zeros([K],np.float64)
allvarA_expect_samp = np.zeros([K,nBoots_work],np.float64)
allA_expectnew = np.zeros([K,nBoots_work],np.float64)
allvarA_expectnew = np.zeros([K,nBoots_work],np.float64)
allaltvarA_expectnew = np.zeros([K,nBoots_work],np.float64)
allA_new_mean_samp = np.zeros([nBoots_work],np.float64)
# Begin bootstrapping loop
for n in range(nBoots_work):
if (n > 0):
print "Bootstrap: %d/%d" % (n,nBoots)
for k in range(K):
if N_kF[k] > 0:
if (n == 0):
booti = np.array(range(N_kF[k]))
else:
booti = np.random.randint(N_kF[k], size = N_kF[k])
E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_kn[:,booti]
E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_knnew[:,booti]
A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn[booti]
A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew[booti]
for k in range(K):
u_kn[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
u_knnew[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_kn[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_knnew[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
############################################################################
# Initialize MBAR
############################################################################
# Initialize MBAR with Newton-Raphson
# Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better)
if (n==0):
initial_f_k = None # start from zero
else:
initial_f_k = mbar.f_k # start from the previous final free energies to speed convergence
mbar = pymbar.MBAR(u_kn, N_kF, verbose=False, relative_tolerance=1e-12,initial_f_k=initial_f_k)
#------------------------------------------------------------------------
# Compute Expectations for energy and angle distributions
#------------------------------------------------------------------------
# print ""
# print "Computing Expectations for E..."
E_kn2 = u_kn # not a copy, we are going to write over it, but we don't need it any more.
E_knnew2 = u_knnew
for k in range(K):
E_kn2[k,:]*=beta_k**(-1) # get the 'unreduced' potential -- we can't take differences of reduced potentials because the beta is different.
E_knnew2[k,:]*=beta_k**(-1)
(E_expect, dE_expect) = mbar.computeExpectations(E_kn2,state_dependent = True)
(E_expectnew, dE_expectnew) = mbar.computeExpectations(E_knnew2,state_dependent = True)
(A_expect, dA_expect) = mbar.computeExpectations(A2_kn,state_dependent = False)
allE_expect[:,n] = E_expect[:]
allE_expectnew[:,n] = E_expectnew[:]
allA_expect[:,n] = A_expect[:]
# expectations for the differences, which we need for numerical derivatives
# To be used once the energy expectations are fixed
(DeltaE_expect, dDeltaE_expect) = mbar.computeExpectations(E_kn2,output='differences', state_dependent = False)
(DeltaE_expectnew, dDeltaE_expectnew) = mbar.computeExpectations(E_knnew2,output='differences', state_dependent = False)
# print "Computing Expectations for E^2..."
(E2_expect, dE2_expect) = mbar.computeExpectations(E_kn2**2, state_dependent = True)
allE2_expect[:,n] = E2_expect[:]
(A_expectnew, dA_expectnew) = mbar.computeExpectations(A2_kn,u_knnew,state_dependent=False)
allA_expectnew[:,n] = A_expectnew[:]
#Variance in sampled calculated observables (i.e. variance in bond length from state we're reweighting from) using MBAR A_expect
#for k in range(K):
# varA_expect_samp[k] = sum([(Ai - A_expect[k])**2 for Ai in A_kn_samp])/len(A_kn_samp)
#allvarA_expect_samp[:,n] = varA_expect_samp[:]
#Variance in unsampled calculated observables using MBAR
varA_mbar_feed = np.zeros([sum(N_kF)],np.float64)
for l in range(sum(N_kF)):
varA_mbar_feed[l] = ((A2_kn[l] - A_expect)**2)
(varA_expectnew,dvarA_expectnew) = mbar.computeExpectations(varA_mbar_feed,u_knnew,state_dependent=False)
allvarA_expectnew[:,n] = varA_expectnew[:]
#Check against calculating variance of A as <x^2> - <A>^2 (instead of <(x-A)^2>)
(A2_expectnew,dA2_expectnew) = mbar.computeExpectations(A2_kn**2,u_knnew,state_dependent=False)
altvarA_expectnew = (A2_expectnew[:] - A_expectnew[:]**2)
allaltvarA_expectnew[:,n] = altvarA_expectnew[:]
#Record mean of sampled observable with bootstrap randomization to get error bars
allA_new_mean_samp[n] = np.mean(A2_knnew)
N_eff = mbar.computeEffectiveSampleNumber(verbose = True)
if nBoots > 0:
A_bootnew = np.zeros([K],dtype=np.float64)
E_bootnew = np.zeros([K],dtype=np.float64)
dE_boot = np.zeros([K],dtype=np.float64)
dE_bootnew = np.zeros([K],dtype=np.float64)
dA_boot = np.zeros([K],dtype=np.float64)
dA_bootnew = np.zeros([K],dtype=np.float64)
varA_bootnew = np.zeros([K],dtype=np.float64)
altvarA_bootnew = np.zeros([K],dtype=np.float64)
dvarA_bootnew = np.zeros([K],dtype=np.float64)
altdvarA_bootnew = np.zeros([K],dtype=np.float64)
A_bootnew_samp = np.mean(allA_new_mean_samp)
dA_bootnew_samp = np.std(allA_new_mean_samp)
for k in range(K):
dE_boot[k] = np.std(allE_expect[k,1:nBoots_work])
dE_bootnew[k] = np.std(allE_expectnew[k,1:nBoots_work])
dA_boot[k] = np.std(allA_expect[k,1:nBoots_work])
dA_bootnew[k] = np.std(allA_expectnew[k,1:nBoots_work])
varA_bootnew[k] = np.average(allvarA_expectnew[k,1:nBoots_work])
altvarA_bootnew[k] = np.average(allaltvarA_expectnew[k,1:nBoots_work])
dvarA_bootnew[k] = np.std(allvarA_expectnew[k,1:nBoots_work])
altdvarA_bootnew[k] = np.std(allaltvarA_expectnew[k,1:nBoots_work])
dA_bootnew = dA_expectnew
varA_bootnew = varA_expectnew
dvarA_bootnew = dvarA_expectnew
altvarA_bootnew = altvarA_expectnew
#altdvarA_bootnew = altdvarA_expectnew
#bins1 = int(np.log2(len(allA_expectnew[0])))
#bins2 = int(np.sqrt(len(allA_expectnew[0])))
#binsnum = int((bins1+bins2)/2)
#plt.figure()
#plt.hist(allA_expectnew[0], binsnum, normed=1, facecolor='green', alpha=0.75)
#plt.xlabel('Length (A)')
#plt.ylabel('Probability')
#plt.axis([min(allA_expectnew[0])-(bins[1]-bins[0]), max(allA_expectnew[0])-(bins[1]-bins[0]), 0, bins[1]-bins[0]])
#plt.grid(True)
#plt.savefig('checkdist.png')
#print "E_expect: %s dE_expect: %s dE_boot: %s \n" % (E_expect,dE_expect,dE_boot)
#print "E_expectnew: %s dE_expectnew: %s dE_bootnew: %s \n" % (E_expectnew,dE_expectnew,dE_bootnew)
#print "delta_E_expect: %s percent_delta_E_expect: %s \n" % (E_expectnew-E_expect, 100.*(E_expectnew-E_expect)/E_expect)
#print "A_expect: %s dA_expect: %s dA_boot: %s \n" % (A_expect,dA_expect,dA_boot)
#print "A_expectnew: %s dA_expectnew: %s dA_bootnew: %s \n" % (A_expectnew,dA_expectnew,dA_bootnew)
#print "varA_bootnew (variance of MBAR A from sampled population): %s sqrt of that: %s True value: %s \n" % (varA_bootnew,varA_bootnew**0.5,np.std(A_knnew_samp)**2,)
#print "The mean of the sampled series = %s \n" % ([np.average(A) for A in A_sub])
#print "The true sampled mean of the observable we're reweighting to = %s \n" % ([np.average(A) for A in A_sub_test])
#print "The mean of the energies corresponding to the sampled series = %s \n" % ([np.average(E) for E in E_kn])
#print "The mean of the energies corresponding to the unsampled series = %s \n" % ([np.average(E) for E in E_knnew])
# calculate standard deviations away that estimate is from sampled value
E_mean_samp = np.array([np.average(E) for E in E_kn])
E_mean_unsamp = np.array([np.average(E) for E in E_knnew])
A_mean_samp = np.array([np.average(A) for A in A_sub])
A_mean_test = np.array([np.average(A) for A in A_sub_test])
varAnew_samp = np.array([np.std(A)**2 for A in A_sub_test])
#print varAnew_samp
#print (dA_bootnew**2)*sum(N_kEn)
E_expect_mean = np.zeros([K],dtype=np.float64)
E_expect_meannew = np.zeros([K],dtype=np.float64)
A_expect_mean_samp = np.zeros([K],dtype=np.float64)
A_expect_mean_unsamp = np.zeros([K],dtype=np.float64)
for k in range(K):
E_expect_mean[k] = np.average(allE_expect[k,1:nBoots_work])
E_expect_meannew[k] = np.average(allE_expectnew[k,1:nBoots_work])
A_expect_mean_samp[k] = np.average(allA_expect[k,1:nBoots_work])
A_expect_mean_unsamp[k] = np.average(allA_expectnew[k,1:nBoots_work])
A_expect_mean_unsamp = A_expectnew
E_expect_meannew = E_expectnew
E_samp_stddevaway = np.zeros([K],np.float64)
E_unsamp_stddevaway = np.zeros([K],np.float64)
A_samp_stddevaway = np.zeros([K],np.float64)
A_test_stddevaway = np.zeros([K],np.float64)
for k in range(K):
E_samp_stddevaway[k] = np.abs(E_mean_samp[k]-E_expect_mean[k])/dE_expect
E_unsamp_stddevaway[k] = np.abs(E_mean_unsamp[k]-E_expect_meannew[k])/dE_expectnew
A_samp_stddevaway[k] = np.abs(A_mean_samp[k]-A_expect_mean_samp[k])/dA_expect
A_test_stddevaway[k] = np.abs(A_mean_test[k]-A_expect_mean_unsamp[k])/dA_expectnew
pctshft = 100.*((np.float(K_k[indparam]) - np.float(K_extra[indparam]))/np.float(K_k[indparam]))
#print "Standard deviations away from true sampled observables for E_expect: %s E_expectnew: %s A_expect: %s A_expect_unsamp: %s" % (E_samp_stddevaway,E_unsamp_stddevaway,A_samp_stddevaway,A_test_stddevaway)
#print "Percent shift = %s \n" % pctshft
#print "Percent of molecule that is %s = %s \n" % (smirkss[0],pctdict[smirkss[0]])
allvarA_expectnew2 = np.zeros([K,nBoots_work],np.float64)
allaltvarA_expectnew2 = np.zeros([K,nBoots_work],np.float64)
for n in range(nBoots_work):
if (n > 0):
print "Bootstrap: %d/%d" % (n,nBoots)
for k in range(K):
if N_kF[k] > 0:
if (n == 0):
booti = np.array(range(N_kF[k]))
else:
booti = np.random.randint(N_kF[k], size = N_kF[k])
E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_kn[:,booti]
E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_knnew[:,booti]
A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn[booti]
A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew[booti]
for k in range(K):
u_kn[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
u_knnew[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_kn[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_knnew[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
############################################################################
# Initialize MBAR
############################################################################
# Initialize MBAR with Newton-Raphson
# Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better)
if (n==0):
initial_f_k = None # start from zero
else:
initial_f_k = mbar.f_k # start from the previous final free energies to speed convergence
mbar = pymbar.MBAR(u_kn, N_kF, verbose=False, relative_tolerance=1e-12,initial_f_k=initial_f_k)
#Variance in unsampled calculated observables using MBAR
varA_mbar_feed2 = np.zeros([sum(N_kF)],np.float64)
for l in range(sum(N_kF)):
varA_mbar_feed2[l] = ((A2_kn[l] - A_expect_mean_unsamp[0])**2)
(varA_expectnew2,dvarA_expectnew2) = mbar.computeExpectations(varA_mbar_feed2,u_knnew,state_dependent=False)
allvarA_expectnew2[:,n] = varA_expectnew2[:]
#Check against calculating variance of A as <x^2> - <A>^2 (instead of <(x-A)^2>)
(A_expectnew2,dA_expectnew2) = mbar.computeExpectations(A2_kn,u_knnew,state_dependent=False)
(A2_expectnew2,dA2_expectnew2) = mbar.computeExpectations(A2_kn**2,u_knnew,state_dependent=False)
altvarA_expectnew2 = (A2_expectnew2[:] - A_expectnew2[:]**2)
allaltvarA_expectnew2[:,n] = altvarA_expectnew2[:]
if nBoots > 0:
varA_bootnew2 = np.zeros([K],dtype=np.float64)
altvarA_bootnew2 = np.zeros([K],dtype=np.float64)
dvarA_bootnew2 = np.zeros([K],dtype=np.float64)
altdvarA_bootnew2 = np.zeros([K],dtype=np.float64)
for k in range(K):
varA_bootnew2[k] = np.average(allvarA_expectnew2[k,1:nBoots_work])
altvarA_bootnew2[k] = np.average(allaltvarA_expectnew2[k,1:nBoots_work])
dvarA_bootnew2 = np.std(allvarA_expectnew2[k,1:nBoots_work])
altdvarA_bootnew2 = np.std(allaltvarA_expectnew2[k,1:nBoots_work])
varA_bootnew2 = varA_expectnew2
dvarA_bootnew2 = dvarA_expectnew2
altvarA_bootnew2 = altvarA_expectnew
#altdvarA_bootnew2 = altdvarA_expectnew2
#print allvarA_expectnew2
#print np.var(allA_expectnew)
bins1 = int(np.log2(len(allvarA_expectnew2[0])))
bins2 = int(np.sqrt(len(allvarA_expectnew2[0])))
binsnum = int((bins1+bins2)/2)
plt.figure()
plt.hist(allvarA_expectnew2[0], binsnum, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('Length^2 (A^2)')
plt.ylabel('Probability')
#plt.axis([min(allA_expectnew[0])-(bins[1]-bins[0]), max(allA_expectnew[0])-(bins[1]-bins[0]), 0, bins[1]-bins[0]])
plt.grid(True)
plt.savefig('checkdist2.png')
#sys.exit()
print '###############################################################################'
molnamedf.append(mol2[ind])
smirksdf.append(smirkss[0])
obstypedf.append(obstype)
paramtypedf.append(paramtype[0])
newparamval.append(K_extra[indparam])
percentshiftdf.append(pctshft)
N_subsampleddf.append(N_kF)
E_expectdf.append(E_expect_mean)
dE_expectdf.append(dE_expect)
dE_bootdf.append(dE_boot)
E_stddevawaydf.append(E_samp_stddevaway)
Enew_expectdf.append(E_expect_meannew)
dEnew_expectdf.append(dE_expectnew)
dEnew_bootdf.append(dE_bootnew)
Enew_stddevawaydf.append(E_unsamp_stddevaway)
A_expectdf.append(A_expect_mean_samp)
dA_expectdf.append(dA_expect)
dA_bootdf.append(dA_boot)
A_stddevawaydf.append(A_samp_stddevaway)
Anew_sampleddf.append(A_mean_test)
Anew_expectdf.append(A_expect_mean_unsamp)
dAnew_expectdf.append(dA_expectnew)
dAnew_bootdf.append(dA_bootnew)
Anew_stddevawaydf.append(A_test_stddevaway)
varAnew_sampdf.append(varAnew_samp)
varAnew_bootdf.append(varA_bootnew)
altvarAnew_bootdf.append(altvarA_bootnew)
dvarAnew_bootdf.append(dvarA_bootnew)
altdvarAnew_bootdf.append(altdvarA_bootnew)
varAnew_bootdf2.append(varA_bootnew2)
altvarAnew_bootdf2.append(altvarA_bootnew2)
dvarAnew_bootdf2.append(dvarA_bootnew2)
altdvarAnew_bootdf2.append(altdvarA_bootnew2)
A_boot_new_sampdf.append(A_bootnew_samp)
dA_boot_new_sampdf.append(dA_bootnew_samp)
print("NEXT LOOP")
########################################################################
df = pd.DataFrame.from_dict({'mol_name':[value for value in molnamedf],
'smirks':[value for value in smirksdf],
'obs_type':[value for value in obstypedf],
'param_type':[value for value in paramtypedf],
'new_param':[value for value in newparamval],
'percent_shift':[value for value in percentshiftdf],
'N_subsampled':[value for value in N_subsampleddf],
'E_expect':[value for value in E_expectdf],
'dE_expect':[value for value in dE_expectdf],
'dE_boot':[value for value in dE_bootdf],
'E_stddevaway':[value for value in E_stddevawaydf],
'Enew_expect':[value for value in Enew_expectdf],
'dEnew_expect':[value for value in dEnew_expectdf],
'dEnew_boot':[value for value in dEnew_bootdf],
'Enew_stddevaway':[value for value in Enew_stddevawaydf],
'A_expect':[value for value in A_expectdf],
'dA_expect':[value for value in dA_expectdf],
'dA_boot':[value for value in dA_bootdf],
'A_stddevaway':[value for value in A_stddevawaydf],
'Anew_sampled':[value for value in Anew_sampleddf],
'Anew_expect':[value for value in Anew_expectdf],
'dAnew_expect':[value for value in dAnew_expectdf],
'dAnew_boot':[value for value in dAnew_bootdf],
'Anew_stddevaway':[value for value in Anew_stddevawaydf],
'varAnew_samp':[value for value in varAnew_sampdf],
'varAnew_boot':[value for value in varAnew_bootdf],
'altvarAnew_boot':[value for value in altvarAnew_bootdf],
'dvarAnew_boot':[value for value in dvarAnew_bootdf],
'altdvarAnew_boot':[value for value in altdvarAnew_bootdf],
'varAnew_boot2':[value for value in varAnew_bootdf2],
'altvarAnew_boot2':[value for value in altvarAnew_bootdf2],
'dvarAnew_boot2':[value for value in dvarAnew_bootdf2],
'altdvarAnew_boot2':[value for value in altdvarAnew_bootdf2],
'A_boot_new_samp':[value for value in A_boot_new_sampdf],
'dA_boot_new_samp':[value for value in dA_boot_new_sampdf]})
df.to_csv('mbar_analyses/mbar_analysis_'+sys.argv[1]+'_'+smirkss[0]+'_'+paramtype[0][0]+'_'+obstype+'.csv',sep=';')
df.to_pickle('mbar_analyses/mbar_analysis_'+sys.argv[1]+'_'+smirkss[0]+'_'+paramtype[0][0]+'_'+obstype+'.pkl')
|
bmanubay/open-forcefield-tools
|
single-molecule-property-generation/manipulateparameters.py
|
Python
|
mit
| 59,104
|
[
"NetCDF",
"OpenMM"
] |
fdd6dd44499b83a681f5dff46c278b428af827864edea23d5fedf54bd15509e3
|
# pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
import csv
import json
import logging
import smtplib
from collections import namedtuple
from datetime import datetime, timedelta
from decimal import Decimal
import pytz
import six
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from config_models.models import ConfigurationModel
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.core.mail.message import EmailMessage
from django.db import models, transaction
from django.db.models import Count, F, Q, Sum
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from six import text_type
from six.moves import range
from course_modes.models import CourseMode
from lms.djangoapps.courseware.courses import get_course_by_id
from edxmako.shortcuts import render_to_string
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
from student.models import CourseEnrollment, EnrollStatusChange
from student.signals import UNENROLL_DONE
from track import segment
from util.query import use_read_replica_if_available
from xmodule.modulestore.django import modulestore
from .exceptions import (
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
InvalidCartItem,
InvalidStatusToRetire,
ItemAlreadyInCartException,
ItemNotFoundInCartException,
MultipleCouponsNotAllowedException,
PurchasedCallbackException,
UnexpectedOrderItemStatus
)
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
(u'cart', u'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
(u'paying', u'paying'),
# The user has successfully purchased the items in the order.
(u'purchased', u'purchased'),
# The user's order has been refunded.
(u'refunded', u'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
(u'defunct-cart', u'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
(u'defunct-paying', u'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
u'cart': u'defunct-cart',
u'paying': u'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = u'personal'
BUSINESS = u'business'
ORDER_TYPES = (
(PERSONAL, u'personal'),
(BUSINESS, u'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
.. pii: Contains many PII fields in an app edx.org does not currently use. "other" data is payment information.
.. pii_types: name, location, email_address, other
.. pii_retirement: retained
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
currency = models.CharField(default=u"usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default=u'cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default=u'personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated:
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info(u"order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_names = []
csv_file = six.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_names.append(course.display_name)
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([six.text_type(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_names
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, site_name, course_names):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')]
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
joined_course_names = " " + ", ".join(course_names)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _(u'Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join(course_names),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': u'{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error(u'Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id)
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
course_names = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, course_names = self.generate_registration_codes_csv(orderitems, site_name)
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, site_name, course_names
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
segment.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
# For Rockerbox integration, we need a field named revenue since they cannot parse a field named total.
# TODO: DE-1188: Remove / move Rockerbox integration code.
'revenue': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in list(ORDER_STATUS_MAP.values()):
return
if self.status not in list(ORDER_STATUS_MAP.keys()):
raise InvalidStatusToRetire(
u"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
base_manager_name = 'objects'
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True, on_delete=models.CASCADE)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default=u'cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default=u"Misc. Item", max_length=1024)
currency = models.CharField(default=u"usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default=u"")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@line_cost.setter
def line_cost(self, value):
"""
Django requires there be a setter for this, but it is not
necessary for the way we currently use it. Raising errors
here will cause a lot of issues and these should not be
mutable after construction, so for now we just eat this.
"""
pass
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
@python_2_unicode_compatible
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
.. pii: Contains many PII fields in an app edx.org does not currently use
.. pii_types: name, location, email_address
.. pii_retirement: retained
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __str__(self):
label = (
six.text_type(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
(u'started', u'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
(u'completed', u'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
(u'cancelled', u'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default=u"usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default=u'started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user', on_delete=models.CASCADE)
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': six.text_type(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
base_manager_name = 'objects'
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True, on_delete=models.CASCADE)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default=u"usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': six.text_type(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = six.text_type(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
.. no_pii:
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order", on_delete=models.CASCADE)
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True, on_delete=models.CASCADE)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True, on_delete=models.CASCADE)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True, on_delete=models.CASCADE)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True, on_delete=models.CASCADE)
redeemed_by = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True, on_delete=models.CASCADE)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
@python_2_unicode_compatible
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime(u"%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, on_delete=models.CASCADE)
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
coupon = models.ForeignKey(Coupon, db_index=True, on_delete=models.CASCADE)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True, on_delete=models.CASCADE)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error(u"User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning(u"User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info(u"User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info(u"Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = Text(_(
u"Please visit your {link_start}dashboard{link_end} "
"to see your new course."
)).format(
link_start=HTML(u'<a href="{url}">').format(url=reverse('dashboard')),
link_end=HTML(u'</a>'),
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = six.text_type(self.course_id)
data['category'] = six.text_type(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + six.text_type(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error(u"User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(u"User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning(u"User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info(u"User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from lms.djangoapps.instructor.views.api import save_registration_code
for i in range(total_registration_codes):
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info(u"Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = six.text_type(self.course_id)
data['category'] = six.text_type(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + six.text_type(self.mode)
return data
@python_2_unicode_compatible
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __str__(self):
return u"{} : {}".format(text_type(self.course_id), self.annotation)
@python_2_unicode_compatible
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __str__(self):
return u"{} : {}".format(text_type(self.course_id), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment, on_delete=models.CASCADE)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if skip_refund or (not course_enrollment.refundable()):
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = u"User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
u' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _(u"{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
dashboard_path = reverse('dashboard')
scheme = u"https" if settings.HTTPS == "on" else u"http"
dashboard_url = "{scheme}://{domain}{path}".format(scheme=scheme, domain=domain, path=dashboard_path)
refund_reminder_msg = _("To receive a refund you may unenroll from the course on your edX Dashboard "
"({dashboard_url}) up to 14 days after your payment or 14 days after your"
" course starts (up to six months after your payment).\n"
).format(dashboard_url=dashboard_url)
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
path = reverse('verify_student_verify_now', kwargs={'course_id': six.text_type(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
u"If you haven't verified your identity yet, please start the verification process"
u" ({verification_url}).").format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date.\n")
refund_reminder = _(
"{refund_reminder_msg}"
"For help unenrolling, Please see How do I unenroll from a course? "
"({how_to_unenroll_link}) in our edX HelpCenter.").format(
refund_reminder_msg=refund_reminder_msg,
how_to_unenroll_link=settings.SUPPORT_HOW_TO_UNENROLL_LINK
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = six.text_type(self.course_id)
data['category'] = six.text_type(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + six.text_type(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""
Configure whether donations are enabled on the site.
.. no_pii:
"""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""
A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
.. no_pii:
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
(u"general", u"A general donation"),
(u"course", u"A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default=u"general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = six.text_type(self.course_id)
data['category'] = six.text_type(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
|
edx-solutions/edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 90,364
|
[
"VisIt"
] |
dccd900365183123be613e05de4f3bd7a81c7222a5bcf8c71666bc68f86e0456
|
# -*- coding: utf-8 -*-
from ase.neb import get_NEB_plot
def NudgedElasticBand(images):
fig = get_NEB_plot(images)
fig.show()
|
askhl/ase
|
ase/gui/neb.py
|
Python
|
gpl-2.0
| 137
|
[
"ASE"
] |
8ba6f5938bc4e7551ec73e9bf3cc96b1c216ea809776ec7aed8c6b5e16d0706e
|
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2019
# (c) University of Strathclyde 2019
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@strath.ac.uk
#
# Leighton Pritchard,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# Cathedral Street,
# Glasgow,
# G1 1XQ
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2016-2019 The James Hutton Institute
# Copyright (c) 2019 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code for handling BLAST output files."""
from typing import Any, List, TextIO
def parse_blasttab(fhandle: TextIO) -> List[List[str]]:
"""Return the passed BLAST tab output file as a list of lists.
:param fhandle: TextIO, filehandle containing BLAST output file
This is used when testing for conserved BLAST output, as the
exact format of the BLAST result can depend on the software version.
For instance, the locally-installed version may be BLASTN+ 2.6.0,
which reports match identity to 3sf, and the version in CI may be
BLASTN+ 2.2.28, which reports to 2sf.
Returning a list of lines, parsed into the appropriate data type,
allows for direct comparison of line content independent of formatting.
"""
retval = []
for line in fhandle.readlines():
splitline = line.split("\t") # type: List[Any]
data = splitline[:2] # First two columns are strings
data += [float(_) for _ in splitline[2:]] # The rest are numeric
retval.append(data)
return retval
|
widdowquinn/pyani
|
pyani/blast.py
|
Python
|
mit
| 2,541
|
[
"BLAST"
] |
992cc85cee63ff2cc18c5edf38472b52a3fe46f997cd2b1738c530c149bc3d86
|
################################################################################
#
# Copyright 2015-2021 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import argparse
import datetime as dt
import json
import os
import signal
import socket
# global issues for multiprocessing
from multiprocessing import Process, Queue, Pool
import whois
import osrframework
import osrframework.domains.gtld as gtld
import osrframework.domains.cctld as cctld
import osrframework.domains.generic_tld as generic_tld
import osrframework.domains.geographic_tld as geographic_tld
import osrframework.domains.brand_tld as brand_tld
import osrframework.domains.other_subdomains as other_subdomains
import osrframework.utils.banner as banner
import osrframework.utils.configuration as configuration
import osrframework.utils.general as general
# Defining the TLD dictionary based on <https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains>
TLD = {}
# Global TLD
TLD["global"] = gtld.tld
# Country Code TLD
TLD["cc"] = cctld.tld
# Generic TLD
TLD["generic"] = generic_tld.tld
# Geographic TLD
TLD["geographic"] = geographic_tld.tld
# Brand TLD
TLD["brand"] = brand_tld.tld
# Other TLD
#TLD["other"] = other_subdomains.tld
def get_whois_info(domain):
"""Method that trie to recover the whois info from a domain
Args:
domain: The domain to verify.
Returns:
dict: A dictionary containing the result as an i3visio entity with its
`value`, `type` and `attributes`.
Raises:
Exception in case the whois fails.
"""
print(f"[i] Running whois for '{general.info(domain)}'...")
new = []
# Grabbing the aliases
try:
tmp = {}
tmp["type"] = "com.i3visio.Alias"
tmp["value"] = str(domain.split(".")[0])
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
info = whois.whois(domain)
if info.status is None:
raise Exception("UnknownDomainError: " + domain + " could not be resolved.")
# Grabbing the emails
try:
emails = {}
emails["type"] = "com.i3visio.Email"
if not isinstance(info.emails, list):
aux = [info.emails]
emails["value"] = json.dumps(aux)
else:
emails["value"] = json.dumps(info.emails)
emails["attributes"] = []
new.append(emails)
except Exception:
pass
# Grabbing the country
try:
if info.country:
tmp = {}
tmp["type"] = "com.i3visio.Location.Country"
tmp["value"] = info.country
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
# Grabbing the state
try:
if info.state:
tmp = {}
tmp["type"] = "com.i3visio.Location.State"
tmp["value"] = info.state
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
# Grabbing the address
try:
if info.address:
tmp = {}
tmp["type"] = "com.i3visio.Location.Address"
tmp["value"] = info.address
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
# Grabbing the zipcode
try:
if info.zipcode:
tmp = {}
tmp["type"] = "com.i3visio.Location.Zipcode"
tmp["value"] = info.zipcode
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
# Grabbing the creation date
try:
if info.creation_date:
tmp = {}
tmp["type"] = "com.i3visio.Date.Creation"
tmp["value"] = info.creation_date[0].isoformat(' ', 'seconds')
tmp["attributes"] = []
new.append(tmp)
except Exception as e:
pass
# Grabbing the updated date
try:
if info.update_date:
tmp = {}
tmp["type"] = "com.i3visio.Date.Update"
tmp["value"] = info.update_date[0].isoformat(' ', 'seconds')
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
# Grabbing the expiration date
try:
if info.expiration_date:
tmp = {}
tmp["type"] = "com.i3visio.Date.Expiration"
tmp["value"] = info.expiration_date[0].isoformat(' ', 'seconds')
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
# Grabbing the regitrar
try:
tmp = {}
tmp["type"] = "com.i3visio.Registrar"
tmp["value"] = str(info.registrar)
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
# Grabbing the regitrar
try:
tmp = {}
tmp["type"] = "com.i3visio.Fullname"
try:
tmp["value"] = str(info.name)
except Exception:
tmp["value"] = info.name
tmp["attributes"] = []
new.append(tmp)
except Exception:
pass
return new
def create_domains(tlds, nicks=None, nicks_file=None):
"""Method that globally permits to generate the domains to be checked
Args:
tlds (list): List of tlds.
nicks (list): List of aliases.
nicks_file (str): The filepath to the aliases file.
Returns:
list: The list of domains to be checked.
"""
domain_candidates = []
if nicks is not None:
for nick in nicks:
for tld in tlds:
tmp = {
"domain" : nick + tld["tld"],
"type" : tld["type"],
"tld": tld["tld"]
}
domain_candidates.append(tmp)
elif nicks_file is not None:
with open(nicks_file, "r") as file:
nicks = file.read().splitlines()
for nick in nicks:
for tld in tlds:
tmp = {
"domain" : nick + tld["tld"],
"type" : tld["type"],
"tld": tld["tld"]
}
domain_candidates.append(tmp)
return domain_candidates
def is_blackListed(ipv4):
"""Method that checks if an IPv4 is blackslited
There are some providers that resolve always. We have identified these IP
so we have to perform an additional chdeck to confirm that the returned
IPv4 is not a false positive.
Args:
ipv4: The IP to be verified.
Returns:
bool: It returns whether the IP is blacklisted.
"""
return ipv4 in [
"45.79.222.138",
"88.198.29.97",
"91.144.20.76",
"127.0.0.1",
"127.0.0.2",
"127.0.53.53",
"141.8.226.58",
"144.76.162.245",
"173.230.131.38",
"109.95.242.11",
"188.93.95.11",
"173.230.141.80",
"198.74.54.240",
"64.70.19.203",
"199.34.229.100",
"109.95.244.12",
"8.23.224.108",
"203.119.4.201"
]
def pool_function(domain, launch_whois=False):
"""Wrapper for being able to launch all the threads of getPageWrapper.
Args:
domain: We receive the parameters as a dictionary.
```
{
"domain" : ".com",
"type" : "global"
}
```
launch_whois: Whether the whois info will be launched.
Returns:
dict: A dictionary containing the following values:
`{"platform" : str(domain), "status": "DONE", "data": aux}`
"""
try:
if domain["type"] != "other" and launch_whois:
whois_info = get_whois_info(domain["domain"])
print(f"[i] Whois data retrieved from '{general.info(domain['domain'])}'.")
else:
whois_info = None
except Exception:
# If something happened... Log the answer
whois_info = None
print(general.warning(f"[!] Something happened when running whois of '{domain['domain']}'."))
try:
aux = {}
aux["type"] = "com.i3visio.Result"
aux["value"] = "Domain Info - " + domain["domain"]
if whois_info:
aux["attributes"] = whois_info
else:
aux["attributes"] = []
# Performing whois info and adding if necessary
tmp = {}
tmp["type"] = "com.i3visio.Domain"
tmp["value"] = domain["domain"]
tmp["attributes"] = []
aux["attributes"].append(tmp)
tmp = {}
tmp["type"] = "com.i3visio.Domain.TLD.Type"
tmp["value"] = domain["type"]
tmp["attributes"] = []
aux["attributes"].append(tmp)
ipv4 = socket.gethostbyname(domain["domain"])
# Check if this ipv4 normally throws false positives
if is_blackListed(ipv4) and not whois_info:
return {"platform": str(domain), "status": "ERROR", "data": {}}
#If we arrive here... The domain resolves so we add the info:
tmp = {}
tmp["type"] = "com.i3visio.IPv4"
tmp["value"] = ipv4
tmp["attributes"] = []
aux["attributes"].append(tmp)
return {"platform" : str(domain), "status": "DONE", "data": aux}
except Exception:
if whois_info:
return {"platform" : str(domain), "status": "DONE", "data": aux}
return {"platform" : str(domain), "status": "ERROR", "data": {}}
def perform_search(domains=[], nThreads=16, launch_whois=False):
"""Method to perform the mail verification process
Args:
domains: List of domains to check.
nThreads: Number of threads to use.
launch_whois: Sets if whois queries will be launched.
Returns:
list: A list containing the results as i3visio entities.
"""
results = []
# Returning None if no valid domain has been returned
if len(domains) == 0:
return results
# If the process is executed by the current app, we use the Processes. It is faster than pools.
if nThreads <= 0 or nThreads > len(domains):
nThreads = len(domains)
# Launching the Pool
# ------------------
# Example catched from: https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python
try:
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = Pool(nThreads)
signal.signal(signal.SIGINT, original_sigint_handler)
except ValueError:
# To avoid: ValueError: signal only works in main thread
pool = Pool(nThreads)
pool_results = []
try:
def log_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
pool_results.append(result)
for d in domains:
# We need to create all the arguments that will be needed
parameters = (d, launch_whois,)
pool.apply_async(pool_function, args=parameters, callback=log_result)
# Waiting for results to be finished
while len(pool_results) < len(domains):
pass
# Closing normal termination
pool.close()
except KeyboardInterrupt:
print(general.warning("\nProcess manually stopped by the user. Terminating workers.\n"))
pool.terminate()
print(general.warning("The following domains were not processed:"))
pending_tld = ""
for dom in domains:
for processed_domain in pool_results:
if str(dom) == processed_domain["platform"]:
break
print(general.warning(f"\t- {dom['domain']}"))
pending_tld += f" {dom['tld']}"
print(general.warning("\n[!] If you want to relaunch the app with the remaining domains, you can always run the command with: "))
print(general.warning(f"\t domainfy ... -t none -u {pending_tld}"))
print(general.warning("\n[!] Otherwise, if you prefer to avoid these platforms in future searches, you can manually avoid them using: "))
print(general.warning(f"\t domainfy ... -x {pending_tld}"))
pool.join()
# Processing the results
# ----------------------
for ser_array in pool_results:
data = ser_array["data"]
# We need to recover the results and check if they are not an empty json or None
if data is not None and data != {}:
results.append(data)
return results
def get_parser():
"""Defines the argument parser
Returns:
argparse.ArgumentParser.
"""
DEFAULT_VALUES = configuration.get_configuration_values_for("domainfy")
# Capturing errors just in case the option is not found in the configuration
try:
exclude_list = [DEFAULT_VALUES["exclude_platforms"]]
except Exception:
exclude_list = []
parser = argparse.ArgumentParser(description='domainfy - Checking the existence of domains that resolev to an IP address.', prog='domainfy', epilog='Check the README.md file for further details on the usage of this program or follow us on Twitter in <http://twitter.com/i3visio>.', add_help=False, conflict_handler='resolve')
parser._optionals.title = "Input options (one required)"
# Adding the main options
group_main_options = parser.add_mutually_exclusive_group(required=True)
group_main_options.add_argument('-n', '--nicks', metavar='<nicks>', nargs='+', action='store', help='the list of nicks to be checked in the domains selected.')
group_main_options.add_argument('-N', '--nicks_file', metavar='<nicks_file>', action='store', help='the file with the list of nicks to be checked in the domains selected.')
group_main_options.add_argument('--license', required=False, action='store_true', default=False, help='shows the GPLv3+ license and exists.')
# Configuring the processing options
group_processing = parser.add_argument_group('Processing arguments', 'Configuring the way in which mailfy will process the identified profiles.')
group_processing.add_argument('-e', '--extension', metavar='<sum_ext>', nargs='+', choices=['csv', 'gml', 'json', 'ods', 'png', 'txt', 'xls', 'xlsx' ], required=False, default=DEFAULT_VALUES.get("extension", ["csv"]), action='store', help='output extension for the summary files. Default: xls.')
group_processing.add_argument('-o', '--output-folder', metavar='<path_to_output_folder>', required=False, default=DEFAULT_VALUES.get("output_folder", "."), action='store', help='output folder for the generated documents. While if the paths does not exist, usufy.py will try to create; if this argument is not provided, usufy will NOT write any down any data. Check permissions if something goes wrong.')
group_processing.add_argument('-t', '--tlds', metavar='<tld_type>', nargs='+', choices=["all", "none"] + list(TLD.keys()), action='store', help='list of TLD types where the nick will be looked for.', required=False, default=DEFAULT_VALUES.get("tlds", ["global"]))
group_processing.add_argument('-u', '--user-defined', metavar='<new_tld>', nargs='+', action='store', help='additional TLD that will be searched.', required=False, default=DEFAULT_VALUES.get("user_defined", []))
group_processing.add_argument('-x', '--exclude', metavar='<domain>', nargs='+', required=False, default=exclude_list, action='store', help="select the domains to be avoided. The format should include the initial '.'.")
group_processing.add_argument('-F', '--file-header', metavar='<alternative_header_file>', required=False, default="profiles", action='store', help='header for the output filenames to be generated. If None was provided the following will be used: profiles.<extension>.' )
group_processing.add_argument('-T', '--threads', metavar='<num_threads>', required=False, action='store', default=16, type=int, help='write down the number of threads to be used (default 16). If 0, the maximum number possible will be used, which may make the system feel unstable.')
group_processing.add_argument('--quiet', required=False, action='store_true', default=False, help='tells the program not to show anything.')
group_processing.add_argument('--whois', required=False, action='store_true', default=False, help='tells the program to launch whois queries.')
# About options
group_about = parser.add_argument_group('About arguments', 'Showing additional information about this program.')
group_about.add_argument('-h', '--help', action='help', help='shows this help and exists.')
group_about.add_argument('--version', action='version', version='[%(prog)s] OSRFramework ' + osrframework.__version__, help='shows the version of the program and exists.')
return parser
def main(params=None):
"""Main function to launch phonefy
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `get_parser()`.
Args:
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
list: Returns a list with i3visio entities.
"""
if params is None:
parser = get_parser()
args = parser.parse_args(params)
else:
args = params
results = []
if not args.quiet:
print(general.title(banner.text))
saying_hello = f"""
Domainfy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2021
This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
are welcome to redistribute it under certain conditions. For additional info,
visit <{general.LICENSE_URL}>.
"""
print(general.info(saying_hello))
if args.license:
general.showLicense()
else:
# Processing the options returned to remove the "all" option
tlds = []
if "all" in args.tlds:
for type_tld in TLD.keys():
for tld in TLD[type_tld]:
if tld not in args.exclude:
tlds.append({"tld": tld, "type": type_tld})
elif "none" in args.tlds:
pass
else:
for type_tld in TLD.keys():
if type_tld in args.tlds:
for tld in TLD[type_tld]:
if tld not in args.exclude:
tlds.append({"tld": tld, "type": type_tld})
for new in args.user_defined:
if new not in args.exclude:
if new[0] == ".":
tlds.append({"tld": new, "type": "user_defined"})
else:
tlds.append({"tld": "." + new, "type": "user_defined"})
if args.nicks:
domains = create_domains(tlds, nicks=args.nicks)
else:
# nicks_file
domains = create_domains(tlds, nicks_file=args.nicks_file)
# Showing the execution time...
if not args.quiet:
startTime = dt.datetime.now()
print(f"{startTime}\tTrying to get information about {general.emphasis(str(len(domains)))} domain(s)…\n")
if len(domains) > 200:
print(""" Note that a full '-t all' search may take around 3.5 mins. If that's too
long for you, try narrowing the search using '-t cc' or similar arguments.
Otherwise, just wait and keep calm!
""")
print(general.emphasis("\tPress <Ctrl + C> to stop...\n"))
# Perform searches, using different Threads
results = perform_search(domains, args.threads, args.whois)
# Trying to store the information recovered
if args.output_folder is not None:
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
# Grabbing the results
file_header = os.path.join(args.output_folder, args.file_header)
for ext in args.extension:
# Generating output files
general.export_usufy(results, ext, file_header)
# Showing the information gathered if requested
if not args.quiet:
now = dt.datetime.now()
print(f"\n{now}\t{general.success(len(results))} results obtained:\n")
try:
print(general.success(general.osrf_to_text_export(results)))
except Exception:
print(general.warning("\nSomething happened when exporting the results. The Json will be shown instead:\n"))
print(general.warning(json.dumps(results, indent=2)))
now = dt.datetime.now()
print(f"\n{now}\tYou can find all the information collected in the following files:")
for ext in args.extension:
# Showing the output files
print(f"\t{general.emphasis(file_header + '.' + ext)}")
# Showing the execution time...
if not args.quiet:
# Showing the execution time...
endTime = dt.datetime.now()
print("\n{}\tFinishing execution...\n".format(endTime))
print("Total time used:\t" + general.emphasis(str(endTime-startTime)))
print("Average seconds/query:\t" + general.emphasis(str((endTime-startTime).total_seconds()/len(domains))) +" seconds\n")
# Urging users to place an issue on Github...
print(banner.footer)
if params:
return results
if __name__ == "__main__":
main(sys.argv[1:])
|
i3visio/osrframework
|
osrframework/domainfy.py
|
Python
|
agpl-3.0
| 22,372
|
[
"VisIt"
] |
d21593562037fda55d4f4f786179c3ef833a7d92fa75725a58693e7a11a8321b
|
#!/usr/bin/env python
'''
Hartree-Fock/DFT with k-points sampling for all-electron calculations
GDF (Gaussian density fitting), MDF (mixed density fitting), RSGDF
(range-separated Gaussian density fitting), or RS-JK builder
can be used in all electron calculations. They are more efficient than the
default SCF JK builder.
'''
import numpy
from pyscf.pbc import gto, scf, dft
cell = gto.M(
a = numpy.eye(3)*3.5668,
atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751''',
basis = '6-31g',
verbose = 4,
)
nk = [4,4,4] # 4 k-poins for each axis, 4^3=64 kpts in total
kpts = cell.make_kpts(nk)
#
# Mixed density fitting
#
kmf = scf.KRHF(cell, kpts).mix_density_fit()
# In the MDF scheme, modifying the default mesh for PWs to reduce the cost
# The default mesh for PWs is a very dense-grid scheme which is automatically
# generated based on the AO basis. It is often not necessary to use dense grid
# for MDF method.
kmf.with_df.mesh = [10,10,10]
kmf.kernel()
#
# Density fitting
#
kmf = dft.KRKS(cell, kpts).density_fit(auxbasis='weigend')
kmf.xc = 'bp86'
kmf.kernel()
#
# Range-separated density fitting (RSDF)
# RSDF uses the same amount of memory & disk as GDF and achieves a similar
# accuracy as GDF but is often 5~10x faster than GDF in the DF initialization
# step. The following run should give an energy very close to the one above.
# see '35-range_separated_density_fitting.py' for more details of RSDF.
#
kmf = dft.KRKS(cell, kpts).rs_density_fit(auxbasis='weigend')
kmf.xc = 'bp86'
kmf.kernel()
#
# RS-JK builder is efficient for large number of k-points
#
kmf = scf.KRHF(cell, kpts).jk_method('RS')
kmf.kernel()
#
# Second order SCF solver can be used in the PBC SCF code the same way in the
# molecular calculation. Note second order SCF algorithm does not support
# smearing method.
#
mf = scf.KRHF(cell, kpts).density_fit()
mf = mf.newton()
mf.kernel()
|
sunqm/pyscf
|
examples/pbc/21-k_points_all_electron_scf.py
|
Python
|
apache-2.0
| 2,174
|
[
"Gaussian",
"PySCF"
] |
b26b9e5ec72427b46fa89fd2cae2db95fa9a7fa5ea23a7a856f67da7cb1eb3b1
|
import numpy as np
import matplotlib.pyplot as plt
from numpy import atleast_2d as twod
################################################################################
## PLOTTING FUNCTIONS #########################################################
################################################################################
def plotClassify2D(learner, X, Y, pre=lambda x: x, axis=None, nGrid=128, **kwargs):
"""
Plot data and classifier outputs on two-dimensional data.
This function plot data (X,Y) and learner.predict(X, Y)
together. The learner is is predicted on a dense grid
covering data X, to show its decision boundary.
Parameters
----------
learner : learner object
A trained learner object that inherits from one of
the 'Classify' or 'Regressor' base classes.
X : numpy array
N x M array of data; N = number of data, M = dimension
(number of features) of data.
Y : numpy array
1 x N arra containing labels corresponding to data points
in X.
pre : function object (optional)
Function that is applied to X before prediction.
axis : a matplotlib axis / plottable object (optional)
nGrid : density of 2D grid points (default 128)
"""
if twod(X).shape[1] != 2:
raise ValueError('plotClassify2D: function can only be called using two-dimensional data (features)')
# TODO: Clean up code
if axis == None: axis = plt
axis.plot( X[:,0],X[:,1], 'k.', visible=False )
# TODO: can probably replace with final dot plot and use transparency for image (?)
ax = axis.axis()
xticks = np.linspace(ax[0],ax[1],nGrid)
yticks = np.linspace(ax[2],ax[3],nGrid)
grid = np.meshgrid( xticks, yticks )
XGrid = np.column_stack( (grid[0].flatten(), grid[1].flatten()) )
if learner is not None:
YGrid = learner.predict( pre(XGrid) )
#axis.contourf( xticks,yticks,YGrid.reshape( (len(xticks),len(yticks)) ), nClasses )
axis.imshow( YGrid.reshape( (len(xticks),len(yticks)) ), extent=ax, interpolation='nearest',origin='lower',alpha=0.5, aspect='auto' )
cmap = plt.cm.get_cmap()
# TODO: if Soft: predictSoft; get colors for each class from cmap; blend pred with colors & show
#
classes = np.unique(Y)
cvals = (classes - min(classes))/(max(classes)-min(classes)+1e-100)
for i,c in enumerate(classes):
axis.plot( X[Y==c,0],X[Y==c,1], 'ko', color=cmap(cvals[i]), **kwargs )
def histy(X,Y,axis=None,**kwargs):
"""
Plot a histogram (using matplotlib.hist) with multiple classes of data
Any additional arguments are passed directly into hist()
Each class of data are plotted as a different color
To specify specific histogram colors, use e.g. facecolor={0:'blue',1:'green',...}
so that facecolor[c] is the color for class c
Related but slightly different appearance to e.g.
matplotlib.hist( [X[Y==c] for c in np.unique(Y)] , histtype='barstacked' )
"""
if axis == None: axis = plt
yvals = np.unique(Y)
nil, bin_edges = np.histogram(X, **kwargs)
C,H = len(yvals),len(nil)
hist = np.zeros( shape=(C,H) )
cmap = plt.cm.get_cmap()
cvals = (yvals - min(yvals))/(max(yvals)-min(yvals)+1e-100)
widthFrac = .25+.75/(1.2+2*np.log10(len(yvals)))
for i,c in enumerate(yvals):
histc,nil = np.histogram(X[Y==c],bins=bin_edges)
hist[i,:] = histc
for j in xrange(H):
for i in np.argsort(hist[:,j])[::-1]:
delta = bin_edges[j+1]-bin_edges[j]
axis.bar(bin_edges[j]+delta/2*i/C*widthFrac,hist[i,j],width=delta*widthFrac,color=cmap(cvals[i]))
def plotPairs(X,Y=None,**kwargs):
"""
Plot all pairs of features in a grid
Diagonal entries are histograms of each feature
Off-diagonal are 2D scatterplots of pairs of features
"""
m,n = X.shape
if Y is None: Y = np.ones( (m,) )
fig,ax = plt.subplots(n,n)
for i in range(n):
for j in range(n):
if i == j:
histy(X[:,i],Y,axis=ax[j,i])
else:
plot_classify_2D(None,X[:,[i,j]],Y,axis=ax[j,i])
def plotGauss2D(mu,cov,*args,**kwargs):
"""
Plot an ellipsoid indicating (one std deviation of) a 2D Gaussian distribution
All additional arguments are passed into plot(.)
"""
from scipy.linalg import sqrtm
theta = np.linspace(0,2*np.pi,50)
circle = np.array([np.sin(theta),np.cos(theta)])
ell = sqrtm(cov).dot(circle)
ell += twod(mu).T
plt.plot( mu[0],mu[1], 'x', ell[0,:],ell[1,:], **kwargs)
# TODO: plotSoftClassify2D
# TODO: plotRegress1D
################################################################################
################################################################################
################################################################################
|
sameersingh/ml-discussions
|
week3/mltools/plot.py
|
Python
|
apache-2.0
| 4,914
|
[
"Gaussian"
] |
300450089e4af615768ae315d3177d1ec182b1ef12bd0108b606500c9a5f5404
|
import numpy as np
from astropy import units as u
import pyspeckit
xaxis = np.linspace(-50,150,100.) * u.km/u.s
sigma = 10. * u.km/u.s
center = 50. * u.km/u.s
synth_data = np.exp(-(xaxis-center)**2/(sigma**2 * 2.))
# Add noise
stddev = 0.1
noise = np.random.randn(xaxis.size)*stddev
error = stddev*np.ones_like(synth_data)
data = noise+synth_data
# this will give a "blank header" warning, which is fine
sp = pyspeckit.Spectrum(data=data, error=error, xarr=xaxis,
unit=u.erg/u.s/u.cm**2/u.AA)
sp.plotter()
sp.plotter.savefig('basic_plot_example.png')
# Fit with automatic guesses
sp.specfit(fittype='gaussian')
# (this will produce a plot overlay showing the fit curve and values)
sp.plotter.savefig('basic_plot_example_withfit.png')
# Redo the overlay with no annotation
# remove both the legend and the model overlay
sp.specfit.clear()
# then re-plot the model without an annotation (legend)
sp.specfit.plot_fit(annotate=False)
sp.plotter.savefig('basic_plot_example_withfit_no_annotation.png')
# overlay another spectrum
# We use the 'synthetic' spectrum with no noise, then shift it by 10 km/s
sp2 = pyspeckit.Spectrum(data=synth_data, error=None, xarr=xaxis+10*u.km/u.s,
unit=u.erg/u.s/u.cm**2/u.AA)
# again, remove the overlaid model fit
sp.specfit.clear()
# to overplot, you need to tell the plotter which matplotlib axis to use and
# tell it not to clear the plot first
sp2.plotter(axis=sp.plotter.axis,
clear=False,
color='g')
# sp2.plotter and sp.plotter can both be used here (they refer to the same axis
# and figure now)
sp.plotter.savefig('basic_plot_example_with_second_spectrum_overlaid_in_green.png')
# the plot window will follow the last plotted spectrum's limits by default;
# that can be overridden with the xmin/xmax keywords
sp2.plotter(axis=sp.plotter.axis,
xmin=-100, xmax=200,
ymin=-0.5, ymax=1.5,
clear=False,
color='g')
sp.plotter.savefig('basic_plot_example_with_second_spectrum_overlaid_in_green_wider_limits.png')
# you can also offset the spectra and set different
# this time, we need to clear the axis first, then do a fresh overlay
# fresh plot
sp.plotter(clear=True)
# overlay, shifted down by 0.2 in y and with a wider linewidth
sp2.plotter(axis=sp.plotter.axis,
offset=-0.2,
clear=False,
color='r',
linewidth=2,
alpha=0.5,
)
# you can also modify the axis properties directly
sp.plotter.axis.set_ylim(-0.25, 1.1)
sp2.plotter.savefig('basic_plot_example_with_second_spectrum_offset_overlaid_in_red.png')
|
low-sky/pyspeckit
|
docs/basic_plot.py
|
Python
|
mit
| 2,651
|
[
"Gaussian"
] |
617f6d3fa6db08ce6e0e8e7b4d2f262454ab8f5ada4821be98da8042853a3d5b
|
# -*- coding: utf-8 -*-
import numpy as np
import datetime
import pytest
import pysteps
from pysteps import cascade, blending
steps_arg_names = (
"n_models",
"n_timesteps",
"n_ens_members",
"n_cascade_levels",
"mask_method",
"probmatching_method",
"blend_nwp_members",
"weights_method",
"decomposed_nwp",
"expected_n_ens_members",
)
steps_arg_values = [
(1, 3, 4, 8, None, None, False, "spn", True, 4),
(1, 3, 4, 8, "obs", None, False, "spn", True, 4),
(1, 3, 4, 8, "incremental", None, False, "spn", True, 4),
(1, 3, 4, 8, None, "mean", False, "spn", True, 4),
(1, 3, 4, 8, None, "cdf", False, "spn", True, 4),
(1, 3, 4, 8, "incremental", "cdf", False, "spn", True, 4),
(1, 3, 4, 6, "incremental", "cdf", False, "bps", True, 4),
(1, 3, 4, 6, "incremental", "cdf", False, "bps", False, 4),
(1, 3, 4, 9, "incremental", "cdf", False, "spn", True, 4),
(2, 3, 10, 8, "incremental", "cdf", False, "spn", True, 10),
(5, 3, 4, 8, "incremental", "cdf", False, "spn", True, 5),
(1, 10, 1, 8, "incremental", "cdf", False, "spn", True, 1),
(5, 3, 2, 8, "incremental", "cdf", True, "spn", True, 2),
]
@pytest.mark.parametrize(steps_arg_names, steps_arg_values)
def test_steps_blending(
n_models,
n_timesteps,
n_ens_members,
n_cascade_levels,
mask_method,
probmatching_method,
blend_nwp_members,
weights_method,
decomposed_nwp,
expected_n_ens_members,
):
pytest.importorskip("cv2")
###
# The input data
###
# Initialise dummy NWP data
nwp_precip = np.zeros((n_models, n_timesteps + 1, 200, 200))
for n_model in range(n_models):
for i in range(nwp_precip.shape[1]):
nwp_precip[n_model, i, 30:185, 30 + 1 * (i + 1) * n_model] = 0.1
nwp_precip[n_model, i, 30:185, 31 + 1 * (i + 1) * n_model] = 0.1
nwp_precip[n_model, i, 30:185, 32 + 1 * (i + 1) * n_model] = 1.0
nwp_precip[n_model, i, 30:185, 33 + 1 * (i + 1) * n_model] = 5.0
nwp_precip[n_model, i, 30:185, 34 + 1 * (i + 1) * n_model] = 5.0
nwp_precip[n_model, i, 30:185, 35 + 1 * (i + 1) * n_model] = 4.5
nwp_precip[n_model, i, 30:185, 36 + 1 * (i + 1) * n_model] = 4.5
nwp_precip[n_model, i, 30:185, 37 + 1 * (i + 1) * n_model] = 4.0
nwp_precip[n_model, i, 30:185, 38 + 1 * (i + 1) * n_model] = 2.0
nwp_precip[n_model, i, 30:185, 39 + 1 * (i + 1) * n_model] = 1.0
nwp_precip[n_model, i, 30:185, 40 + 1 * (i + 1) * n_model] = 0.5
nwp_precip[n_model, i, 30:185, 41 + 1 * (i + 1) * n_model] = 0.1
# Define dummy nowcast input data
radar_precip = np.zeros((3, 200, 200))
for i in range(2):
radar_precip[i, 5:150, 30 + 1 * i] = 0.1
radar_precip[i, 5:150, 31 + 1 * i] = 0.5
radar_precip[i, 5:150, 32 + 1 * i] = 0.5
radar_precip[i, 5:150, 33 + 1 * i] = 5.0
radar_precip[i, 5:150, 34 + 1 * i] = 5.0
radar_precip[i, 5:150, 35 + 1 * i] = 4.5
radar_precip[i, 5:150, 36 + 1 * i] = 4.5
radar_precip[i, 5:150, 37 + 1 * i] = 4.0
radar_precip[i, 5:150, 38 + 1 * i] = 1.0
radar_precip[i, 5:150, 39 + 1 * i] = 0.5
radar_precip[i, 5:150, 40 + 1 * i] = 0.5
radar_precip[i, 5:150, 41 + 1 * i] = 0.1
radar_precip[2, 30:155, 30 + 1 * 2] = 0.1
radar_precip[2, 30:155, 31 + 1 * 2] = 0.1
radar_precip[2, 30:155, 32 + 1 * 2] = 1.0
radar_precip[2, 30:155, 33 + 1 * 2] = 5.0
radar_precip[2, 30:155, 34 + 1 * 2] = 5.0
radar_precip[2, 30:155, 35 + 1 * 2] = 4.5
radar_precip[2, 30:155, 36 + 1 * 2] = 4.5
radar_precip[2, 30:155, 37 + 1 * 2] = 4.0
radar_precip[2, 30:155, 38 + 1 * 2] = 2.0
radar_precip[2, 30:155, 39 + 1 * 2] = 1.0
radar_precip[2, 30:155, 40 + 1 * 3] = 0.5
radar_precip[2, 30:155, 41 + 1 * 3] = 0.1
metadata = dict()
metadata["unit"] = "mm"
metadata["transformation"] = "dB"
metadata["accutime"] = 5.0
metadata["transform"] = "dB"
metadata["zerovalue"] = 0.0
metadata["threshold"] = 0.01
metadata["zr_a"] = 200.0
metadata["zr_b"] = 1.6
# Also set the outdir_path and clim_kwargs
outdir_path_skill = "./tmp/"
clim_kwargs = dict({"n_models": n_models, "window_length": 30})
###
# First threshold the data and convert it to dBR
###
# threshold the data
radar_precip[radar_precip < metadata["threshold"]] = 0.0
nwp_precip[nwp_precip < metadata["threshold"]] = 0.0
# convert the data
converter = pysteps.utils.get_method("mm/h")
radar_precip, _ = converter(radar_precip, metadata)
nwp_precip, metadata = converter(nwp_precip, metadata)
# transform the data
transformer = pysteps.utils.get_method(metadata["transformation"])
radar_precip, _ = transformer(radar_precip, metadata)
nwp_precip, metadata = transformer(nwp_precip, metadata)
# set NaN equal to zero
radar_precip[~np.isfinite(radar_precip)] = metadata["zerovalue"]
nwp_precip[~np.isfinite(nwp_precip)] = metadata["zerovalue"]
assert (
np.any(~np.isfinite(radar_precip)) == False
), "There are still infinite values in the input radar data"
assert (
np.any(~np.isfinite(nwp_precip)) == False
), "There are still infinite values in the NWP data"
###
# Decompose the R_NWP data
###
# Initial decomposition settings
decomp_method, _ = cascade.get_method("fft")
bandpass_filter_method = "gaussian"
precip_shape = radar_precip.shape[1:]
filter_method = cascade.get_method(bandpass_filter_method)
bp_filter = filter_method(precip_shape, n_cascade_levels)
# If we only use one model:
if nwp_precip.ndim == 3:
nwp_precip = nwp_precip[None, :]
if decomposed_nwp:
nwp_precip_decomp = []
# Loop through the n_models
for i in range(nwp_precip.shape[0]):
R_d_models_ = []
# Loop through the time steps
for j in range(nwp_precip.shape[1]):
R_ = decomp_method(
field=nwp_precip[i, j, :, :],
bp_filter=bp_filter,
normalize=True,
compute_stats=True,
compact_output=True,
)
R_d_models_.append(R_)
nwp_precip_decomp.append(R_d_models_)
nwp_precip_decomp = np.array(nwp_precip_decomp)
assert nwp_precip_decomp.ndim == 2, "Wrong number of dimensions in R_d_models"
else:
nwp_precip_decomp = nwp_precip.copy()
assert nwp_precip_decomp.ndim == 4, "Wrong number of dimensions in R_d_models"
###
# Determine the velocity fields
###
oflow_method = pysteps.motion.get_method("lucaskanade")
radar_velocity = oflow_method(radar_precip)
nwp_velocity = []
# Loop through the models
for n_model in range(nwp_precip.shape[0]):
# Loop through the timesteps. We need two images to construct a motion
# field, so we can start from timestep 1. Timestep 0 will be the same
# as timestep 0.
_V_NWP_ = []
for t in range(1, nwp_precip.shape[1]):
V_NWP_ = oflow_method(nwp_precip[n_model, t - 1 : t + 1, :])
_V_NWP_.append(V_NWP_)
V_NWP_ = None
_V_NWP_ = np.insert(_V_NWP_, 0, _V_NWP_[0], axis=0)
nwp_velocity.append(_V_NWP_)
nwp_velocity = np.stack(nwp_velocity)
assert nwp_velocity.ndim == 5, "nwp_velocity must be a five-dimensional array"
###
# The nowcasting
###
precip_forecast = blending.steps.forecast(
precip=radar_precip,
precip_models=nwp_precip_decomp,
velocity=radar_velocity,
velocity_models=nwp_velocity,
timesteps=n_timesteps,
timestep=5.0,
issuetime=datetime.datetime.strptime("202112012355", "%Y%m%d%H%M"),
n_ens_members=n_ens_members,
n_cascade_levels=n_cascade_levels,
blend_nwp_members=blend_nwp_members,
precip_thr=metadata["threshold"],
kmperpixel=1.0,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="nonparametric",
noise_stddev_adj="auto",
ar_order=2,
vel_pert_method=None,
weights_method=weights_method,
conditional=False,
probmatching_method=probmatching_method,
mask_method=mask_method,
callback=None,
return_output=True,
seed=None,
num_workers=1,
fft_method="numpy",
domain="spatial",
outdir_path_skill=outdir_path_skill,
extrap_kwargs=None,
filter_kwargs=None,
noise_kwargs=None,
vel_pert_kwargs=None,
clim_kwargs=clim_kwargs,
mask_kwargs=None,
measure_time=False,
)
assert precip_forecast.ndim == 4, "Wrong amount of dimensions in forecast output"
assert (
precip_forecast.shape[0] == expected_n_ens_members
), "Wrong amount of output ensemble members in forecast output"
assert (
precip_forecast.shape[1] == n_timesteps
), "Wrong amount of output time steps in forecast output"
# Transform the data back into mm/h
precip_forecast, _ = converter(precip_forecast, metadata)
assert (
precip_forecast.ndim == 4
), "Wrong amount of dimensions in converted forecast output"
assert (
precip_forecast.shape[0] == expected_n_ens_members
), "Wrong amount of output ensemble members in converted forecast output"
assert (
precip_forecast.shape[1] == n_timesteps
), "Wrong amount of output time steps in converted forecast output"
|
pySTEPS/pysteps
|
pysteps/tests/test_blending_steps.py
|
Python
|
bsd-3-clause
| 9,737
|
[
"Gaussian"
] |
7a04152847fde05c14e181ed6f4c484258f969ed3f192016c6028c234979955e
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test pickling of Iris objects.
"""
# Import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests # isort:skip
import io
import pickle
import cf_units
import numpy as np
import iris
from iris._lazy_data import as_concrete_data, as_lazy_data
class TestPickle(tests.IrisTest):
def pickle_then_unpickle(self, obj):
"""
Returns a generator of ("pickle protocol number", object) tuples.
"""
for protocol in range(1 + pickle.HIGHEST_PROTOCOL):
bio = io.BytesIO()
pickle.dump(obj, bio, protocol)
# move the bio back to the start and reconstruct
bio.seek(0)
reconstructed_obj = pickle.load(bio)
yield protocol, reconstructed_obj
@staticmethod
def _real_data(cube):
# Get the concrete data of the cube for performing data values
# comparison checks.
return as_concrete_data(cube.core_data())
def assertCubeData(self, cube1, cube2):
self.assertArrayEqual(self._real_data(cube1), self._real_data(cube2))
@tests.skip_data
def test_cube_pickle(self):
cube = iris.load_cube(
tests.get_data_path(("PP", "globClim1", "theta.pp"))
)
self.assertTrue(cube.has_lazy_data())
self.assertCML(
cube, ("cube_io", "pickling", "theta.cml"), checksum=False
)
for p, recon_cube in self.pickle_then_unpickle(cube):
self.assertTrue(recon_cube.has_lazy_data())
self.assertCML(
recon_cube,
("cube_io", "pickling", "theta.cml"),
checksum=False,
)
self.assertCubeData(cube, recon_cube)
@tests.skip_data
def test_cube_with_coord_points(self):
filename = tests.get_data_path(
("NetCDF", "rotated", "xy", "rotPole_landAreaFraction.nc")
)
cube = iris.load_cube(filename)
# Pickle and unpickle. Do not perform any CML tests
# to avoid side effects.
_, recon_cube = next(self.pickle_then_unpickle(cube))
self.assertEqual(recon_cube, cube)
def test_cube_with_deferred_unit_conversion(self):
real_data = np.arange(12.0).reshape((3, 4))
lazy_data = as_lazy_data(real_data)
cube = iris.cube.Cube(lazy_data, units="m")
cube.convert_units("ft")
_, recon_cube = next(self.pickle_then_unpickle(cube))
self.assertEqual(recon_cube, cube)
@tests.skip_data
def test_cubelist_pickle(self):
cubelist = iris.load(
tests.get_data_path(("PP", "COLPEX", "theta_and_orog_subset.pp"))
)
single_cube = cubelist[0]
self.assertCML(cubelist, ("cube_io", "pickling", "cubelist.cml"))
self.assertCML(single_cube, ("cube_io", "pickling", "single_cube.cml"))
for _, reconstructed_cubelist in self.pickle_then_unpickle(cubelist):
self.assertCML(
reconstructed_cubelist, ("cube_io", "pickling", "cubelist.cml")
)
self.assertCML(
reconstructed_cubelist[0],
("cube_io", "pickling", "single_cube.cml"),
)
for cube_orig, cube_reconstruct in zip(
cubelist, reconstructed_cubelist
):
self.assertArrayEqual(cube_orig.data, cube_reconstruct.data)
self.assertEqual(cube_orig, cube_reconstruct)
def test_picking_equality_misc(self):
items_to_test = [
cf_units.Unit(
"hours since 2007-01-15 12:06:00",
calendar=cf_units.CALENDAR_STANDARD,
),
cf_units.as_unit("1"),
cf_units.as_unit("meters"),
cf_units.as_unit("no-unit"),
cf_units.as_unit("unknown"),
]
for orig_item in items_to_test:
for protocol, reconst_item in self.pickle_then_unpickle(orig_item):
fail_msg = (
"Items are different after pickling "
"at protocol {}.\nOrig item: {!r}\nNew item: {!r}"
)
fail_msg = fail_msg.format(protocol, orig_item, reconst_item)
self.assertEqual(orig_item, reconst_item, fail_msg)
if __name__ == "__main__":
tests.main()
|
SciTools/iris
|
lib/iris/tests/test_pickling.py
|
Python
|
lgpl-3.0
| 4,558
|
[
"NetCDF"
] |
86d3eb62fce352d95b5213e6db46a839f933748643390fbfdbc792a92c866205
|
# -*- coding: utf-8 -*-
# (c) 2016 Andreas Motl <andreas.motl@elmyra.de>
import types
import tempfile
from pprint import pprint
from string import Template
from pkg_resources import resource_string
from twisted.web.template import renderElement
from twisted.logger import Logger
from kotori.io.export.html import DygraphsPage
from kotori.io.protocol.util import get_data_uri
from kotori.io.export.util import dataframe_index_to_column, dataframe_wide_to_long_indexed, dataframe_index_and_sort
from kotori.io.export.util import matplotlib_locator_formatter
log = Logger()
class UniversalPlotter(object):
"""
Universal plotter for timeseries data.
Render pandas DataFrame to different timeseries plots.
See also: http://pandas.pydata.org/pandas-docs/stable/cookbook.html#cookbook-plotting
Tabular data:
- CSV
- JSON
- HTML
- Excel (XLSX)
- HDF5
- NetCDF
- DataTables HTML widget
Timeseries plots:
- [PNG] matplotlib
- [PNG] ggplot
- [HTML] dygraphs
- [HTML] Bokeh
- [HTML] Vega/Vincent
"""
def __init__(self, bucket, dataframe):
self.bucket = bucket
self.request = bucket.request
self.dataframe = dataframe
def render(self, format, kind=None, buffer=None):
if format == 'png':
return self.render_png(buffer)
elif format == 'html':
return self.render_html(kind)
elif format == 'json':
return self.render_json(kind)
def render_png(self, buffer):
"""
Render timeseries plots as PNG images.
"""
bucket = self.bucket
import matplotlib.font_manager
matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
import matplotlib
try:
matplotlib.use('agg')
except:
pass
import matplotlib.pyplot as plt
df = self.dataframe
#df = df.set_index(['time'])
# Compute datetime range boundaries
datetime_min = min(df.time)
datetime_max = max(df.time)
datetime_delta = datetime_max - datetime_min
#xmin = pd.to_datetime('2016-05-01')
#xmax = pd.to_datetime('2016-08-01')
renderer = bucket.tdata.get('renderer', 'matplotlib')
if renderer == 'matplotlib':
# Bring DataFrame into appropriate format
df = dataframe_index_and_sort(df, 'time')
# Propagate non-null values forward or backward, otherwise
# matplotlib would not plot the sparse data frame properly.
# With time series data, using pad/ffill is extremely common so that the “last known value” is available at every time point.
# http://pandas.pydata.org/pandas-docs/stable/missing_data.html#filling-missing-values-fillna
df.fillna(method='pad', inplace=True)
# Make plots of DataFrame using matplotlib / pylab.
# http://matplotlib.org/
# http://pandas.pydata.org/pandas-docs/version/0.13.1/visualization.html
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html
# https://markthegraph.blogspot.de/2015/05/plotting-time-series-dataframes-in.html
if 'style' in bucket.tdata and bucket.tdata.style:
try:
plt.style.use(bucket.tdata.style)
except Exception:
error_message = u'# Unknown style "{style_name}", available styles: {available}'.format(
style_name=bucket.tdata.style, available=plt.style.available)
log.error(error_message)
return self.request.error_response(bucket, error_message)
# Basic plotting
#df.plot()
#plt.savefig(buffer)
# Advanced plotting
ax = df.plot()
fig = ax.get_figure()
# Figure heading
title = fig.suptitle(bucket.title.human, fontsize=12)
#fig.tight_layout(pad=1.5)
# Axis and tick labels
ax.set_xlabel('Time')
ax.set_ylabel('Value')
ax.tick_params(axis='x', labelsize='smaller')
# Grid and legend
# http://matplotlib.org/users/legend_guide.html
# http://matplotlib.org/examples/pylab_examples/legend_demo3.html
ax.grid(True)
legend_params = dict(ncol=1, loc='center left', bbox_to_anchor=(1, 0.5), fontsize='small', shadow=True, fancybox=True)
legend = ax.legend(**legend_params) # title='Origin'
#ax.legend(**legend_params) # title='Origin'
# Sort list of legend labels
# http://stackoverflow.com/questions/22263807/how-is-order-of-items-in-matplotlib-legend-determined/27512450#27512450
# Axis formatting
#ax.xaxis_date()
#ax.autoscale_view()
# Compute appropriate locator and formatter
locator, formatter = matplotlib_locator_formatter(datetime_delta, span=1)
#ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
# Figure formatting
fig.autofmt_xdate()
# http://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box/10154763#10154763
fig.savefig(buffer, bbox_extra_artists=(title, legend), bbox_inches='tight')
# TODO: Add annotations
"""
# https://stackoverflow.com/questions/11067368/annotate-time-series-plot-in-matplotlib
# https://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot
import matplotlib.dates as mdates
fig = plot.draw()
ax = fig.axes[0]
ax.annotate('Test', (mdates.date2num(x[1]), y[1]), xytext=(15, 15),
textcoords='offset points', arrowprops=dict(arrowstyle='-|>'))
"""
elif renderer == 'ggplot':
# https://yhat.github.io/ggplot/notebook.html?page=build/docs/examples/Multiple%20Line%20Plot.html
# https://stackoverflow.com/questions/23541497/is-there-a-way-to-plot-a-pandas-series-in-ggplot
# https://stackoverflow.com/questions/24478925/is-it-possible-to-plot-multiline-chart-on-python-ggplot/24479513#24479513
# https://github.com/yhat/ggplot/blob/master/docs/how-to/Building%20Faceted%20(or%20Trellised)%20Plots.ipynb
# https://github.com/yhat/ggplot/blob/master/docs/how-to/Annotating%20Plots%20-%20Titles%20and%20Labels.ipynb
# https://github.com/yhat/ggplot/blob/master/docs/how-to/How%20to%20make%20xkcd%20style%20graphs.ipynb
from ggplot import ggplot, aes, qplot, geom_line, geom_text, ggtitle, stat_smooth, scale_x_date, date_format, date_breaks
from ggplot import theme_538, theme_bw, theme_gray, theme_xkcd
# https://stackoverflow.com/questions/24478925/is-it-possible-to-plot-multiline-chart-on-python-ggplot/24479513#24479513
# https://stackoverflow.com/questions/23541497/is-there-a-way-to-plot-a-pandas-series-in-ggplot
# Convert DataFrame from wide to long format, retaining "time" as visible column
df = dataframe_wide_to_long_indexed(df, 'time')
dataframe_index_to_column(df, 'time')
# Compute appropriate locator and formatter
locator, formatter = matplotlib_locator_formatter(datetime_delta, span=2)
plot = ggplot(df, aes(x='time', y='value', color='variable'))\
+ geom_line()\
+ scale_x_date(limits=(datetime_min, datetime_max), breaks=locator, labels=formatter)\
+ ggtitle(bucket.title.human)
# Axis labels
plot.xlab = 'Time'
plot.ylab = 'Value'
# Labs
#+ stat_smooth(colour='blue', span=0.2) \
#+ geom_text(aes(x='x', y='y'), label='hello world')
#+ scale_x_date(limits=(xmin, xmax), breaks=date_breaks('1 hour'), labels=date_format('%Y-%m-%d\n%H:%M'))
theme_name = bucket.tdata.get('theme')
# TODO: Switching themes will leak some matplotlib/pyplot properties, postpone to future versions
if theme_name:
if type(theme_name) is types.FloatType:
theme_name = str(int(theme_name))
try:
theme = eval('theme_' + theme_name)
plot += theme()
except Exception:
error_message = u'# Unknown theme "{theme_name}"'.format(theme_name=theme_name)
log.error(error_message)
return self.request.error_response(bucket, error_message)
plot.save(buffer)
# Attempt to reset global matplotlib parameters to get rid of xkcd theme style
"""
import matplotlib as mpl
#mpl.rcParams = mpl.rc_params()
#del mpl.rcParams['path.sketch']
#del mpl.rcParams['path.effects']
#mpl.rcParams = mpl.defaultParams.copy()
#mpl.rcParams.clear()
#mpl.rcdefaults()
#mpl.rcParams = mpl.rcParamsOrig
if 'axes.prop_cycle' in mpl.rcParams:
del mpl.rcParams['axes.prop_cycle']
mpl.rcParams.update({'path.sketch': None, 'path.effects': []})
mpl.rcParams.update(mpl.rc_params())
"""
elif renderer == 'seaborn':
# TODO: We don't do statistical plotting yet.
# https://stanford.edu/~mwaskom/software/seaborn/examples/timeseries_from_dataframe.html
# https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.tsplot.html
import seaborn as sns
sns.set(style="darkgrid")
#sns.tsplot(data=gammas, time="timepoint", unit="subject", condition="ROI", value="BOLD signal")
#print dir(df)
#df['time'] = pandas.to_datetime(df['time'])
#df = df.set_index(df.time)
pprint(df)
sns.tsplot(data=df, time="time")
#sns.tsplot(data=df)
plt.savefig(buffer)
else:
error_message = u'# Unknown renderer "{renderer_name}"'.format(renderer_name=renderer)
log.error(error_message)
return self.request.error_response(bucket, error_message)
def render_html(self, kind):
"""
Render HTML-based timeseries plots for dygraphs, Bokeh and Vega.
"""
# Variable aliases
bucket = self.bucket
df = self.dataframe
if kind == 'dygraphs':
# http://dygraphs.com/
# Compute data_uri, forward "from" and "to" parameters
data_uri = get_data_uri(bucket, 'data.csv', {'pad': 'true'})
# Render HTML snippet containing dygraphs widget
page = DygraphsPage(data_uri=data_uri, bucket=bucket)
bucket.request.setHeader('Content-Type', 'text/html; charset=utf-8')
return renderElement(bucket.request, page)
elif kind == 'bokeh':
# http://bokeh.pydata.org/
from bokeh.io import save
from bokeh.charts import TimeSeries, vplot
# Propagate non-null values forward or backward, otherwise
# Bokeh would not plot the sparse data frame properly.
# With time series data, using pad/ffill is extremely common so that the “last known value” is available at every time point.
# http://pandas.pydata.org/pandas-docs/stable/missing_data.html#filling-missing-values-fillna
df.fillna(method='pad', inplace=True)
# Plot using matplotlib
# http://bokeh.pydata.org/en/latest/docs/user_guide/compat.html#userguide-compat
# https://github.com/bokeh/bokeh/tree/master/examples/compat/
# https://github.com/bokeh/bokeh/blob/master/examples/compat/pandas_dataframe.py
# https://github.com/bokeh/bokeh/blob/master/examples/compat/ggplot_line.py
#df.plot()
#what = mpl.to_bokeh()
# Plot using Bokeh TimeSeries
# http://bokeh.pydata.org/en/latest/docs/reference/charts.html#timeseries
# http://bokeh.pydata.org/en/0.11.1/docs/user_guide/styling.html#location
linegraph = TimeSeries(df, x='time', title=bucket.title.human, legend="top_left", width=800)
# Plot TimeSeries object
what = vplot(linegraph)
# Render using Bokeh
t = tempfile.NamedTemporaryFile(suffix='.html', delete=False)
save(what, filename=t.name, title=bucket.title.human)
# Forward html to http response
bucket.request.setHeader('Content-Type', 'text/html; charset=utf-8')
return t.read()
elif kind == 'vega':
# https://github.com/wrobstory/vincent
# Compute data_uri, forward "from" and "to" parameters
data_uri = get_data_uri(bucket, 'data.vega.json', {'pad': 'true', 'backfill': 'true'})
template = Template(str(resource_string('kotori.io.export', 'vega_template.html')))
bucket.request.setHeader('Content-Type', 'text/html; charset=utf-8')
response = template.substitute(path=data_uri, title=bucket.title.human)
return response.encode('utf-8')
def render_json(self, kind):
"""
Render JSON chart description for HTML-based timeseries plot Vega, using Vincent.
"""
# Variable aliases
bucket = self.bucket
df = self.dataframe
if kind == 'vega':
# https://github.com/wrobstory/vincent
# https://github.com/wrobstory/vincent/blob/master/examples/line_chart_examples.py
# https://stackoverflow.com/questions/29288914/how-to-get-vincent-to-display-a-pandas-date-time-axis-correctly
# https://wrobstory.github.io/2013/04/pandas-vincent-timeseries.html
from vincent import Line
# Mungle DataFrame into appropriate format
df = dataframe_index_and_sort(df, 'time')
vis = Line(df)
vis.axis_titles(x='Time', y='Value')
vis.legend(title='Origin')
bucket.request.setHeader('Content-Type', 'application/json')
return vis.to_json()
|
zerotired/kotori
|
kotori/io/export/plot.py
|
Python
|
agpl-3.0
| 14,575
|
[
"NetCDF"
] |
162197733836d2fa0127d4e71ac2a2c56479ebff691775bc4121a0408c00d546
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import inspect
import logging
import time
import uuid
import urllib.parse
# Django
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import OneToOneRel
from django.http import QueryDict
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import views as auth_views
# Django REST Framework
from rest_framework.exceptions import PermissionDenied, AuthenticationFailed, ParseError, NotAcceptable, UnsupportedMediaType
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from rest_framework import views
from rest_framework.permissions import AllowAny
from rest_framework.renderers import StaticHTMLRenderer, JSONRenderer
from rest_framework.negotiation import DefaultContentNegotiation
# AWX
from awx.api.filters import FieldLookupBackend
from awx.main.models import (
UnifiedJob, UnifiedJobTemplate, User, Role, Credential,
WorkflowJobTemplateNode, WorkflowApprovalTemplate
)
from awx.main.access import access_registry
from awx.main.utils import (
camelcase_to_underscore,
get_search_fields,
getattrd,
get_object_or_400,
decrypt_field,
get_awx_version,
get_licenser,
StubLicense
)
from awx.main.utils.db import get_all_field_names
from awx.main.views import ApiErrorView
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
from awx.api.versioning import URLPathVersioning
from awx.api.metadata import SublistAttachDetatchMetadata, Metadata
__all__ = ['APIView', 'GenericAPIView', 'ListAPIView', 'SimpleListAPIView',
'ListCreateAPIView', 'SubListAPIView', 'SubListCreateAPIView',
'SubListDestroyAPIView',
'SubListCreateAttachDetachAPIView', 'RetrieveAPIView',
'RetrieveUpdateAPIView', 'RetrieveDestroyAPIView',
'RetrieveUpdateDestroyAPIView',
'SubDetailAPIView',
'ResourceAccessList',
'ParentMixin',
'DeleteLastUnattachLabelMixin',
'SubListAttachDetachAPIView',
'CopyAPIView', 'BaseUsersList',]
logger = logging.getLogger('awx.api.generics')
analytics_logger = logging.getLogger('awx.analytics.performance')
class LoggedLoginView(auth_views.LoginView):
def get(self, request, *args, **kwargs):
# The django.auth.contrib login form doesn't perform the content
# negotiation we've come to expect from DRF; add in code to catch
# situations where Accept != text/html (or */*) and reply with
# an HTTP 406
try:
DefaultContentNegotiation().select_renderer(
request,
[StaticHTMLRenderer],
'html'
)
except NotAcceptable:
resp = Response(status=status.HTTP_406_NOT_ACCEPTABLE)
resp.accepted_renderer = StaticHTMLRenderer()
resp.accepted_media_type = 'text/plain'
resp.renderer_context = {}
return resp
return super(LoggedLoginView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
current_user = getattr(request, 'user', None)
if request.user.is_authenticated:
logger.info(smart_text(u"User {} logged in from {}".format(self.request.user.username,request.META.get('REMOTE_ADDR', None))))
ret.set_cookie('userLoggedIn', 'true')
current_user = UserSerializer(self.request.user)
current_user = smart_text(JSONRenderer().render(current_user.data))
current_user = urllib.parse.quote('%s' % current_user, '')
ret.set_cookie('current_user', current_user, secure=settings.SESSION_COOKIE_SECURE or None)
return ret
else:
if 'username' in self.request.POST:
logger.warn(smart_text(u"Login failed for user {} from {}".format(self.request.POST.get('username'),request.META.get('REMOTE_ADDR', None))))
ret.status_code = 401
return ret
class LoggedLogoutView(auth_views.LogoutView):
def dispatch(self, request, *args, **kwargs):
original_user = getattr(request, 'user', None)
ret = super(LoggedLogoutView, self).dispatch(request, *args, **kwargs)
current_user = getattr(request, 'user', None)
ret.set_cookie('userLoggedIn', 'false')
if (not current_user or not getattr(current_user, 'pk', True)) \
and current_user != original_user:
logger.info("User {} logged out.".format(original_user.username))
return ret
def get_view_description(view, html=False):
'''Wrapper around REST framework get_view_description() to continue
to support our historical div.
'''
desc = views.get_view_description(view, html=html)
if html:
desc = '<div class="description">%s</div>' % desc
return mark_safe(desc)
def get_default_schema():
if settings.SETTINGS_MODULE == 'awx.settings.development':
from awx.api.swagger import AutoSchema
return AutoSchema()
else:
return views.APIView.schema
class APIView(views.APIView):
schema = get_default_schema()
versioning_class = URLPathVersioning
def initialize_request(self, request, *args, **kwargs):
'''
Store the Django REST Framework Request object as an attribute on the
normal Django request, store time the request started.
'''
self.time_started = time.time()
if getattr(settings, 'SQL_DEBUG', False):
self.queries_before = len(connection.queries)
# If there are any custom headers in REMOTE_HOST_HEADERS, make sure
# they respect the allowed proxy list
if all([
settings.PROXY_IP_ALLOWED_LIST,
request.environ.get('REMOTE_ADDR') not in settings.PROXY_IP_ALLOWED_LIST,
request.environ.get('REMOTE_HOST') not in settings.PROXY_IP_ALLOWED_LIST
]):
for custom_header in settings.REMOTE_HOST_HEADERS:
if custom_header.startswith('HTTP_'):
request.environ.pop(custom_header, None)
drf_request = super(APIView, self).initialize_request(request, *args, **kwargs)
request.drf_request = drf_request
try:
request.drf_request_user = getattr(drf_request, 'user', False)
except AuthenticationFailed:
request.drf_request_user = None
except (PermissionDenied, ParseError) as exc:
request.drf_request_user = None
self.__init_request_error__ = exc
except UnsupportedMediaType as exc:
exc.detail = _('You did not use correct Content-Type in your HTTP request. '
'If you are using our REST API, the Content-Type must be application/json')
self.__init_request_error__ = exc
return drf_request
def finalize_response(self, request, response, *args, **kwargs):
'''
Log warning for 400 requests. Add header with elapsed time.
'''
#
# If the URL was rewritten, and we get a 404, we should entirely
# replace the view in the request context with an ApiErrorView()
# Without this change, there will be subtle differences in the BrowseableAPIRenderer
#
# These differences could provide contextual clues which would allow
# anonymous users to determine if usernames were valid or not
# (e.g., if an anonymous user visited `/api/v2/users/valid/`, and got a 404,
# but also saw that the page heading said "User Detail", they might notice
# that's a difference in behavior from a request to `/api/v2/users/not-valid/`, which
# would show a page header of "Not Found"). Changing the view here
# guarantees that the rendered response will look exactly like the response
# when you visit a URL that has no matching URL paths in `awx.api.urls`.
#
if response.status_code == 404 and 'awx.named_url_rewritten' in request.environ:
self.headers.pop('Allow', None)
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
view = ApiErrorView()
setattr(view, 'request', request)
response.renderer_context['view'] = view
return response
if response.status_code >= 400:
status_msg = "status %s received by user %s attempting to access %s from %s" % \
(response.status_code, request.user, request.path, request.META.get('REMOTE_ADDR', None))
if hasattr(self, '__init_request_error__'):
response = self.handle_exception(self.__init_request_error__)
if response.status_code == 401:
response.data['detail'] += ' To establish a login session, visit /api/login/.'
logger.info(status_msg)
else:
logger.warning(status_msg)
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
time_started = getattr(self, 'time_started', None)
response['X-API-Product-Version'] = get_awx_version()
response['X-API-Product-Name'] = 'AWX' if isinstance(get_licenser(), StubLicense) else 'Red Hat Ansible Tower'
response['X-API-Node'] = settings.CLUSTER_HOST_ID
if time_started:
time_elapsed = time.time() - self.time_started
response['X-API-Time'] = '%0.3fs' % time_elapsed
if getattr(settings, 'SQL_DEBUG', False):
queries_before = getattr(self, 'queries_before', 0)
q_times = [float(q['time']) for q in connection.queries[queries_before:]]
response['X-API-Query-Count'] = len(q_times)
response['X-API-Query-Time'] = '%0.3fs' % sum(q_times)
if getattr(self, 'deprecated', False):
response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' # noqa
return response
def get_authenticate_header(self, request):
# HTTP Basic auth is insecure by default, because the basic auth
# backend does not provide CSRF protection.
#
# If you visit `/api/v2/job_templates/` and we return
# `WWW-Authenticate: Basic ...`, your browser will prompt you for an
# HTTP basic auth username+password and will store it _in the browser_
# for subsequent requests. Because basic auth does not require CSRF
# validation (because it's commonly used with e.g., tower-cli and other
# non-browser clients), browsers that save basic auth in this way are
# vulnerable to cross-site request forgery:
#
# 1. Visit `/api/v2/job_templates/` and specify a user+pass for basic auth.
# 2. Visit a nefarious website and submit a
# `<form action='POST' method='https://tower.example.org/api/v2/job_templates/N/launch/'>`
# 3. The browser will use your persisted user+pass and your login
# session is effectively hijacked.
#
# To prevent this, we will _no longer_ send `WWW-Authenticate: Basic ...`
# headers in responses; this means that unauthenticated /api/v2/... requests
# will now return HTTP 401 in-browser, rather than popping up an auth dialog.
#
# This means that people who wish to use the interactive API browser
# must _first_ login in via `/api/login/` to establish a session (which
# _does_ enforce CSRF).
#
# CLI users can _still_ specify basic auth credentials explicitly via
# a header or in the URL e.g.,
# `curl https://user:pass@tower.example.org/api/v2/job_templates/N/launch/`
return 'Bearer realm=api authorization_url=/api/o/authorize/'
def get_description_context(self):
return {
'view': self,
'docstring': type(self).__doc__ or '',
'deprecated': getattr(self, 'deprecated', False),
'swagger_method': getattr(self.request, 'swagger_method', None),
}
@property
def description(self):
template_list = []
for klass in inspect.getmro(type(self)):
template_basename = camelcase_to_underscore(klass.__name__)
template_list.append('api/%s.md' % template_basename)
context = self.get_description_context()
description = render_to_string(template_list, context)
if context.get('deprecated') and context.get('swagger_method') is None:
# render deprecation messages at the very top
description = '\n'.join([render_to_string('api/_deprecated.md', context), description])
return description
def update_raw_data(self, data):
# Remove the parent key if the view is a sublist, since it will be set
# automatically.
parent_key = getattr(self, 'parent_key', None)
if parent_key:
data.pop(parent_key, None)
# Use request data as-is when original request is an update and the
# submitted data was rejected.
request_method = getattr(self, '_raw_data_request_method', None)
response_status = getattr(self, '_raw_data_response_status', 0)
if request_method in ('POST', 'PUT', 'PATCH') and response_status in range(400, 500):
return self.request.data.copy()
return data
def determine_version(self, request, *args, **kwargs):
return (
getattr(request, 'version', None),
getattr(request, 'versioning_scheme', None),
)
def dispatch(self, request, *args, **kwargs):
if self.versioning_class is not None:
scheme = self.versioning_class()
request.version, request.versioning_scheme = (
scheme.determine_version(request, *args, **kwargs),
scheme
)
if 'version' in kwargs:
kwargs.pop('version')
return super(APIView, self).dispatch(request, *args, **kwargs)
def check_permissions(self, request):
if request.method not in ('GET', 'OPTIONS', 'HEAD'):
if 'write' not in getattr(request.user, 'oauth_scopes', ['write']):
raise PermissionDenied()
return super(APIView, self).check_permissions(request)
class GenericAPIView(generics.GenericAPIView, APIView):
# Base class for all model-based views.
# Subclasses should define:
# model = ModelClass
# serializer_class = SerializerClass
def get_serializer(self, *args, **kwargs):
serializer = super(GenericAPIView, self).get_serializer(*args, **kwargs)
# Override when called from browsable API to generate raw data form;
# update serializer "validated" data to be displayed by the raw data
# form.
if hasattr(self, '_raw_data_form_marker'):
# Always remove read only fields from serializer.
for name, field in list(serializer.fields.items()):
if getattr(field, 'read_only', None):
del serializer.fields[name]
serializer._data = self.update_raw_data(serializer.data)
return serializer
def get_queryset(self):
if self.queryset is not None:
return self.queryset._clone()
elif self.model is not None:
qs = self.model._default_manager
if self.model in access_registry:
access_class = access_registry[self.model]
if access_class.select_related:
qs = qs.select_related(*access_class.select_related)
if access_class.prefetch_related:
qs = qs.prefetch_related(*access_class.prefetch_related)
return qs
else:
return super(GenericAPIView, self).get_queryset()
def get_description_context(self):
# Set instance attributes needed to get serializer metadata.
if not hasattr(self, 'request'):
self.request = None
if not hasattr(self, 'format_kwarg'):
self.format_kwarg = 'format'
d = super(GenericAPIView, self).get_description_context()
if hasattr(self.model, "_meta"):
if hasattr(self.model._meta, "verbose_name"):
d.update({
'model_verbose_name': smart_text(self.model._meta.verbose_name),
'model_verbose_name_plural': smart_text(self.model._meta.verbose_name_plural),
})
serializer = self.get_serializer()
metadata = self.metadata_class()
metadata.request = self.request
for method, key in [
('GET', 'serializer_fields'),
('POST', 'serializer_create_fields'),
('PUT', 'serializer_update_fields')
]:
d[key] = metadata.get_serializer_info(serializer, method=method)
d['settings'] = settings
return d
class SimpleListAPIView(generics.ListAPIView, GenericAPIView):
def get_queryset(self):
return self.request.user.get_queryset(self.model)
class ListAPIView(generics.ListAPIView, GenericAPIView):
# Base class for a read-only list view.
def get_queryset(self):
return self.request.user.get_queryset(self.model)
def get_description_context(self):
if 'username' in get_all_field_names(self.model):
order_field = 'username'
else:
order_field = 'name'
d = super(ListAPIView, self).get_description_context()
d.update({
'order_field': order_field,
})
return d
@property
def search_fields(self):
return get_search_fields(self.model)
@property
def related_search_fields(self):
def skip_related_name(name):
return (
name is None or name.endswith('_role') or name.startswith('_') or
name.startswith('deprecated_') or name.endswith('_set') or
name == 'polymorphic_ctype')
fields = set([])
for field in self.model._meta.fields:
if skip_related_name(field.name):
continue
if getattr(field, 'related_model', None):
fields.add('{}__search'.format(field.name))
for related in self.model._meta.related_objects:
name = related.related_name
if isinstance(related, OneToOneRel) and self.model._meta.verbose_name.startswith('unified'):
# Add underscores for polymorphic subclasses for user utility
name = related.related_model._meta.verbose_name.replace(" ", "_")
if skip_related_name(name) or name.endswith('+'):
continue
fields.add('{}__search'.format(name))
m2m_related = []
m2m_related += self.model._meta.local_many_to_many
if issubclass(self.model, UnifiedJobTemplate) and self.model != UnifiedJobTemplate:
m2m_related += UnifiedJobTemplate._meta.local_many_to_many
if issubclass(self.model, UnifiedJob) and self.model != UnifiedJob:
m2m_related += UnifiedJob._meta.local_many_to_many
for relationship in m2m_related:
if skip_related_name(relationship.name):
continue
if relationship.related_model._meta.app_label != 'main':
continue
fields.add('{}__search'.format(relationship.name))
fields = list(fields)
allowed_fields = []
for field in fields:
try:
FieldLookupBackend().get_field_from_lookup(self.model, field)
except PermissionDenied:
pass
except FieldDoesNotExist:
allowed_fields.append(field)
else:
allowed_fields.append(field)
return allowed_fields
class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView):
# Base class for a list view that allows creating new objects.
pass
class ParentMixin(object):
parent_object = None
def get_parent_object(self):
if self.parent_object is not None:
return self.parent_object
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
self.parent_object = get_object_or_404(self.parent_model, **parent_filter)
return self.parent_object
def check_parent_access(self, parent=None):
parent = parent or self.get_parent_object()
parent_access = getattr(self, 'parent_access', 'read')
if parent_access in ('read', 'delete'):
args = (self.parent_model, parent_access, parent)
else:
args = (self.parent_model, parent_access, parent, None)
if not self.request.user.can_access(*args):
raise PermissionDenied()
class SubListAPIView(ParentMixin, ListAPIView):
# Base class for a read-only sublist view.
# Subclasses should define at least:
# model = ModelClass
# serializer_class = SerializerClass
# parent_model = ModelClass
# relationship = 'rel_name_from_parent_to_model'
# And optionally (user must have given access permission on parent object
# to view sublist):
# parent_access = 'read'
def get_description_context(self):
d = super(SubListAPIView, self).get_description_context()
d.update({
'parent_model_verbose_name': smart_text(self.parent_model._meta.verbose_name),
'parent_model_verbose_name_plural': smart_text(self.parent_model._meta.verbose_name_plural),
})
return d
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model).distinct()
sublist_qs = self.get_sublist_queryset(parent)
return qs & sublist_qs
def get_sublist_queryset(self, parent):
return getattrd(parent, self.relationship).distinct()
class DestroyAPIView(generics.DestroyAPIView):
def has_delete_permission(self, obj):
return self.request.user.can_access(self.model, 'delete', obj)
def perform_destroy(self, instance, check_permission=True):
if check_permission and not self.has_delete_permission(instance):
raise PermissionDenied()
super(DestroyAPIView, self).perform_destroy(instance)
class SubListDestroyAPIView(DestroyAPIView, SubListAPIView):
"""
Concrete view for deleting everything related by `relationship`.
"""
check_sub_obj_permission = True
def destroy(self, request, *args, **kwargs):
instance_list = self.get_queryset()
if (not self.check_sub_obj_permission and
not request.user.can_access(self.parent_model, 'delete', self.get_parent_object())):
raise PermissionDenied()
self.perform_list_destroy(instance_list)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_list_destroy(self, instance_list):
if self.check_sub_obj_permission:
for instance in instance_list:
if not self.has_delete_permission(instance):
raise PermissionDenied()
for instance in instance_list:
self.perform_destroy(instance, check_permission=False)
class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
# Base class for a sublist view that allows for creating subobjects
# associated with the parent object.
# In addition to SubListAPIView properties, subclasses may define (if the
# sub_obj requires a foreign key to the parent):
# parent_key = 'field_on_model_referring_to_parent'
def get_description_context(self):
d = super(SubListCreateAPIView, self).get_description_context()
d.update({
'parent_key': getattr(self, 'parent_key', None),
})
return d
def get_queryset(self):
if hasattr(self, 'parent_key'):
# Prefer this filtering because ForeignKey allows us more assumptions
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(**{self.parent_key: parent})
return super(SubListCreateAPIView, self).get_queryset()
def create(self, request, *args, **kwargs):
# If the object ID was not specified, it probably doesn't exist in the
# DB yet. We want to see if we can create it. The URL may choose to
# inject it's primary key into the object because we are posting to a
# subcollection. Use all the normal access control mechanisms.
# Make a copy of the data provided (since it's readonly) in order to
# inject additional data.
if hasattr(request.data, 'copy'):
data = request.data.copy()
else:
data = QueryDict('')
data.update(request.data)
# add the parent key to the post data using the pk from the URL
parent_key = getattr(self, 'parent_key', None)
if parent_key:
data[parent_key] = self.kwargs['pk']
# attempt to deserialize the object
serializer = self.get_serializer(data=data)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
# Verify we have permission to add the object as given.
if not request.user.can_access(self.model, 'add', serializer.validated_data):
raise PermissionDenied()
# save the object through the serializer, reload and returned the saved
# object deserialized
obj = serializer.save()
serializer = self.get_serializer(instance=obj)
headers = {'Location': obj.get_absolute_url(request)}
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
# Base class for a sublist view that allows for creating subobjects and
# attaching/detaching them from the parent.
def is_valid_relation(self, parent, sub, created=False):
return None
def get_description_context(self):
d = super(SubListCreateAttachDetachAPIView, self).get_description_context()
d.update({
"has_attach": True,
})
return d
def attach_validate(self, request):
sub_id = request.data.get('id', None)
res = None
if sub_id and not isinstance(sub_id, int):
data = dict(msg=_('"id" field must be an integer.'))
res = Response(data, status=status.HTTP_400_BAD_REQUEST)
return (sub_id, res)
def attach(self, request, *args, **kwargs):
created = False
parent = self.get_parent_object()
relationship = getattrd(parent, self.relationship)
data = request.data
sub_id, res = self.attach_validate(request)
if res:
return res
# Create the sub object if an ID is not provided.
if not sub_id:
response = self.create(request, *args, **kwargs)
if response.status_code != status.HTTP_201_CREATED:
return response
sub_id = response.data['id']
data = response.data
try:
location = response['Location']
except KeyError:
location = None
created = True
# Retrive the sub object (whether created or by ID).
sub = get_object_or_400(self.model, pk=sub_id)
# Verify we have permission to attach.
if not request.user.can_access(self.parent_model, 'attach', parent, sub,
self.relationship, data,
skip_sub_obj_read_check=created):
raise PermissionDenied()
# Verify that the relationship to be added is valid.
attach_errors = self.is_valid_relation(parent, sub, created=created)
if attach_errors is not None:
if created:
sub.delete()
return Response(attach_errors, status=status.HTTP_400_BAD_REQUEST)
# Attach the object to the collection.
if sub not in relationship.all():
relationship.add(sub)
if created:
headers = {}
if location:
headers['Location'] = location
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
def unattach_validate(self, request):
sub_id = request.data.get('id', None)
res = None
if not sub_id:
data = dict(msg=_('"id" is required to disassociate'))
res = Response(data, status=status.HTTP_400_BAD_REQUEST)
elif not isinstance(sub_id, int):
data = dict(msg=_('"id" field must be an integer.'))
res = Response(data, status=status.HTTP_400_BAD_REQUEST)
return (sub_id, res)
def unattach_by_id(self, request, sub_id):
parent = self.get_parent_object()
parent_key = getattr(self, 'parent_key', None)
relationship = getattrd(parent, self.relationship)
sub = get_object_or_400(self.model, pk=sub_id)
if not request.user.can_access(self.parent_model, 'unattach', parent,
sub, self.relationship, request.data):
raise PermissionDenied()
if parent_key:
sub.delete()
else:
relationship.remove(sub)
return Response(status=status.HTTP_204_NO_CONTENT)
def unattach(self, request, *args, **kwargs):
(sub_id, res) = self.unattach_validate(request)
if res:
return res
return self.unattach_by_id(request, sub_id)
def post(self, request, *args, **kwargs):
if not isinstance(request.data, dict):
return Response('invalid type for post data',
status=status.HTTP_400_BAD_REQUEST)
if 'disassociate' in request.data:
return self.unattach(request, *args, **kwargs)
else:
return self.attach(request, *args, **kwargs)
class SubListAttachDetachAPIView(SubListCreateAttachDetachAPIView):
'''
Derived version of SubListCreateAttachDetachAPIView that prohibits creation
'''
metadata_class = SublistAttachDetatchMetadata
def post(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if not sub_id:
return Response(
dict(msg=_("{} 'id' field is missing.".format(
self.model._meta.verbose_name.title()))),
status=status.HTTP_400_BAD_REQUEST)
return super(SubListAttachDetachAPIView, self).post(request, *args, **kwargs)
def update_raw_data(self, data):
request_method = getattr(self, '_raw_data_request_method', None)
response_status = getattr(self, '_raw_data_response_status', 0)
if request_method == 'POST' and response_status in range(400, 500):
return super(SubListAttachDetachAPIView, self).update_raw_data(data)
return {'id': None}
class DeleteLastUnattachLabelMixin(object):
'''
Models for which you want the last instance to be deleted from the database
when the last disassociate is called should inherit from this class. Further,
the model should implement is_detached()
'''
def unattach(self, request, *args, **kwargs):
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
if res:
return res
res = super(DeleteLastUnattachLabelMixin, self).unattach_by_id(request, sub_id)
obj = self.model.objects.get(id=sub_id)
if obj.is_detached():
obj.delete()
return res
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
pass
class RetrieveAPIView(generics.RetrieveAPIView, GenericAPIView):
pass
class RetrieveUpdateAPIView(RetrieveAPIView, generics.RetrieveUpdateAPIView):
def update(self, request, *args, **kwargs):
self.update_filter(request, *args, **kwargs)
return super(RetrieveUpdateAPIView, self).update(request, *args, **kwargs)
def partial_update(self, request, *args, **kwargs):
self.update_filter(request, *args, **kwargs)
return super(RetrieveUpdateAPIView, self).partial_update(request, *args, **kwargs)
def update_filter(self, request, *args, **kwargs):
''' scrub any fields the user cannot/should not put/patch, based on user context. This runs after read-only serialization filtering '''
pass
class RetrieveDestroyAPIView(RetrieveAPIView, DestroyAPIView):
pass
class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView):
pass
class ResourceAccessList(ParentMixin, ListAPIView):
serializer_class = ResourceAccessListElementSerializer
ordering = ('username',)
def get_queryset(self):
obj = self.get_parent_object()
content_type = ContentType.objects.get_for_model(obj)
roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id))
ancestors = set()
for r in roles:
ancestors.update(set(r.ancestors.all()))
return User.objects.filter(roles__in=list(ancestors)).distinct()
def trigger_delayed_deep_copy(*args, **kwargs):
from awx.main.tasks import deep_copy_model_obj
connection.on_commit(lambda: deep_copy_model_obj.delay(*args, **kwargs))
class CopyAPIView(GenericAPIView):
serializer_class = CopySerializer
permission_classes = (AllowAny,)
copy_return_serializer_class = None
new_in_330 = True
new_in_api_v2 = True
def _get_copy_return_serializer(self, *args, **kwargs):
if not self.copy_return_serializer_class:
return self.get_serializer(*args, **kwargs)
serializer_class_store = self.serializer_class
self.serializer_class = self.copy_return_serializer_class
ret = self.get_serializer(*args, **kwargs)
self.serializer_class = serializer_class_store
return ret
@staticmethod
def _decrypt_model_field_if_needed(obj, field_name, field_val):
if field_name in getattr(type(obj), 'REENCRYPTION_BLOCKLIST_AT_COPY', []):
return field_val
if isinstance(obj, Credential) and field_name == 'inputs':
for secret in obj.credential_type.secret_fields:
if secret in field_val:
field_val[secret] = decrypt_field(obj, secret)
elif isinstance(field_val, dict):
for sub_field in field_val:
if isinstance(sub_field, str) \
and isinstance(field_val[sub_field], str):
field_val[sub_field] = decrypt_field(obj, field_name, sub_field)
elif isinstance(field_val, str):
try:
field_val = decrypt_field(obj, field_name)
except AttributeError:
return field_val
return field_val
def _build_create_dict(self, obj):
ret = {}
if self.copy_return_serializer_class:
all_fields = Metadata().get_serializer_info(
self._get_copy_return_serializer(), method='POST'
)
for field_name, field_info in all_fields.items():
if not hasattr(obj, field_name) or field_info.get('read_only', True):
continue
ret[field_name] = CopyAPIView._decrypt_model_field_if_needed(
obj, field_name, getattr(obj, field_name)
)
return ret
@staticmethod
def copy_model_obj(old_parent, new_parent, model, obj, creater, copy_name='', create_kwargs=None):
fields_to_preserve = set(getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []))
fields_to_discard = set(getattr(model, 'FIELDS_TO_DISCARD_AT_COPY', []))
m2m_to_preserve = {}
o2m_to_preserve = {}
create_kwargs = create_kwargs or {}
for field_name in fields_to_discard:
create_kwargs.pop(field_name, None)
for field in model._meta.get_fields():
try:
field_val = getattr(obj, field.name)
except AttributeError:
continue
# Adjust copy blocked fields here.
if field.name in fields_to_discard or field.name in [
'id', 'pk', 'polymorphic_ctype', 'unifiedjobtemplate_ptr', 'created_by', 'modified_by'
] or field.name.endswith('_role'):
create_kwargs.pop(field.name, None)
continue
if field.one_to_many:
if field.name in fields_to_preserve:
o2m_to_preserve[field.name] = field_val
elif field.many_to_many:
if field.name in fields_to_preserve and not old_parent:
m2m_to_preserve[field.name] = field_val
elif field.many_to_one and not field_val:
create_kwargs.pop(field.name, None)
elif field.many_to_one and field_val == old_parent:
create_kwargs[field.name] = new_parent
elif field.name == 'name' and not old_parent:
create_kwargs[field.name] = copy_name or field_val + ' copy'
elif field.name in fields_to_preserve:
create_kwargs[field.name] = CopyAPIView._decrypt_model_field_if_needed(
obj, field.name, field_val
)
# WorkflowJobTemplateNodes that represent an approval are *special*;
# when we copy them, we actually want to *copy* the UJT they point at
# rather than share the template reference between nodes in disparate
# workflows
if (
isinstance(obj, WorkflowJobTemplateNode) and
isinstance(getattr(obj, 'unified_job_template'), WorkflowApprovalTemplate)
):
new_approval_template, sub_objs = CopyAPIView.copy_model_obj(
None, None, WorkflowApprovalTemplate,
obj.unified_job_template, creater
)
create_kwargs['unified_job_template'] = new_approval_template
new_obj = model.objects.create(**create_kwargs)
logger.debug('Deep copy: Created new object {}({})'.format(
new_obj, model
))
# Need to save separatedly because Djang-crum get_current_user would
# not work properly in non-request-response-cycle context.
new_obj.created_by = creater
new_obj.save()
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
for m2m in m2m_to_preserve:
for related_obj in m2m_to_preserve[m2m].all():
getattr(new_obj, m2m).add(related_obj)
if not old_parent:
sub_objects = []
for o2m in o2m_to_preserve:
for sub_obj in o2m_to_preserve[o2m].all():
sub_model = type(sub_obj)
sub_objects.append((sub_model.__module__, sub_model.__name__, sub_obj.pk))
return new_obj, sub_objects
ret = {obj: new_obj}
for o2m in o2m_to_preserve:
for sub_obj in o2m_to_preserve[o2m].all():
ret.update(CopyAPIView.copy_model_obj(obj, new_obj, type(sub_obj), sub_obj, creater))
return ret
def get(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(obj.__class__, 'read', obj):
raise PermissionDenied()
create_kwargs = self._build_create_dict(obj)
for key in create_kwargs:
create_kwargs[key] = getattr(create_kwargs[key], 'pk', None) or create_kwargs[key]
try:
can_copy = request.user.can_access(self.model, 'add', create_kwargs) and \
request.user.can_access(self.model, 'copy_related', obj)
except PermissionDenied:
return Response({'can_copy': False})
return Response({'can_copy': can_copy})
def post(self, request, *args, **kwargs):
obj = self.get_object()
create_kwargs = self._build_create_dict(obj)
create_kwargs_check = {}
for key in create_kwargs:
create_kwargs_check[key] = getattr(create_kwargs[key], 'pk', None) or create_kwargs[key]
if not request.user.can_access(self.model, 'add', create_kwargs_check):
raise PermissionDenied()
if not request.user.can_access(self.model, 'copy_related', obj):
raise PermissionDenied()
serializer = self.get_serializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
new_obj, sub_objs = CopyAPIView.copy_model_obj(
None, None, self.model, obj, request.user, create_kwargs=create_kwargs,
copy_name=serializer.validated_data.get('name', '')
)
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
new_obj.admin_role.members.add(request.user)
if sub_objs:
# store the copied object dict into cache, because it's
# often too large for postgres' notification bus
# (which has a default maximum message size of 8k)
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
cache.set(key, sub_objs, timeout=3600)
permission_check_func = None
if hasattr(type(self), 'deep_copy_permission_check_func'):
permission_check_func = (
type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func'
)
trigger_delayed_deep_copy(
self.model.__module__, self.model.__name__,
obj.pk, new_obj.pk, request.user.pk, key,
permission_check_func=permission_check_func
)
serializer = self._get_copy_return_serializer(new_obj)
headers = {'Location': new_obj.get_absolute_url(request=request)}
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class BaseUsersList(SubListCreateAttachDetachAPIView):
def post(self, request, *args, **kwargs):
ret = super(BaseUsersList, self).post( request, *args, **kwargs)
if ret.status_code != 201:
return ret
try:
if ret.data is not None and request.data.get('is_system_auditor', False):
# This is a faux-field that just maps to checking the system
# auditor role member list.. unfortunately this means we can't
# set it on creation, and thus needs to be set here.
user = User.objects.get(id=ret.data['id'])
user.is_system_auditor = request.data['is_system_auditor']
ret.data['is_system_auditor'] = request.data['is_system_auditor']
except AttributeError as exc:
print(exc)
pass
return ret
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/api/generics.py
|
Python
|
apache-2.0
| 43,854
|
[
"VisIt"
] |
99576a4541001357b74b26e6a314eff884f00793c580238a4af33216b01a6c40
|
# -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham, A. Roitman
# Copyright (C) 2007-2009 B. Malengier
# Copyright (C) 2008 Lukasz Rymarczyk
# Copyright (C) 2008 Raphael Ackermann
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2012 Doug Blank
# Copyright (C) 2012-2013 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Module responsible for handling the command line arguments for Gramps.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import sys
import getopt
import logging
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gprime.const import LONGOPTS, SHORTOPTS, PLUGINS_DIR
from gprime.plug import BasePluginManager
from gprime.config import config
from gprime.utils.cast import get_type_converter
from gprime.const import LOCALE as glocale
_ = glocale.translation.gettext
_HELP = _("""
Usage: gramps.py [OPTION...]
--load-modules=MODULE1,MODULE2,... Dynamic modules to load
Help options
-?, --help Show this help message
--usage Display brief usage message
Application options
-O, --open=FAMILY_TREE Open Family Tree
-C, --create=FAMILY_TREE Create on open if new Family Tree
-i, --import=FILENAME Import file
-e, --export=FILENAME Export file
-r, --remove=FAMILY_TREE_PATTERN Remove matching Family Tree(s) (use regular expressions)
-f, --format=FORMAT Specify Family Tree format
-a, --action=ACTION Specify action
-p, --options=OPTIONS_STRING Specify options
-d, --debug=LOGGER_NAME Enable debug logs
-l [FAMILY_TREE_PATTERN...] List Family Trees
-L [FAMILY_TREE_PATTERN...] List Family Trees in Detail
-t [FAMILY_TREE_PATTERN...] List Family Trees, tab delimited
-u, --force-unlock Force unlock of Family Tree
-s, --show Show config settings
-c, --config=[config.setting[:value]] Set config setting(s) and start Gramps
-y, --yes Don't ask to confirm dangerous actions (non-GUI mode only)
-q, --quiet Suppress progress indication output (non-GUI mode only)
-v, --version Show versions
-b, --databases Show available database backends
""")
_USAGE = _("""
Example of usage of Gramps command line interface
1. To import four databases (whose formats can be determined from their names)
and then check the resulting database for errors, one may type:
gramps -i file1.ged -i file2.gpkg -i ~/db3.gramps -i file4.wft -a tool -p name=check.
2. To explicitly specify the formats in the above example, append filenames with appropriate -f options:
gramps -i file1.ged -f gedcom -i file2.gpkg -f gramps-pkg -i ~/db3.gramps -f gramps-xml -i file4.wft -f wft -a tool -p name=check.
3. To record the database resulting from all imports, supply -e flag
(use -f if the filename does not allow Gramps to guess the format):
gramps -i file1.ged -i file2.gpkg -e ~/new-package -f gramps-pkg
4. To save any error messages of the above example into files outfile and errfile, run:
gramps -i file1.ged -i file2.dpkg -e ~/new-package -f gramps-pkg >outfile 2>errfile
5. To import three databases and start interactive Gramps session with the result:
gramps -i file1.ged -i file2.gpkg -i ~/db3.gramps
6. To open a database and, based on that data, generate timeline report in PDF format
putting the output into the my_timeline.pdf file:
gramps -O 'Family Tree 1' -a report -p name=timeline,off=pdf,of=my_timeline.pdf
7. To generate a summary of a database:
gramps -O 'Family Tree 1' -a report -p name=summary
8. Listing report options
Use the name=timeline,show=all to find out about all available options for the timeline report.
To find out details of a particular option, use show=option_name , e.g. name=timeline,show=off string.
To learn about available report names, use name=show string.
9. To convert a Family Tree on the fly to a .gramps xml file:
gramps -O 'Family Tree 1' -e output.gramps -f gramps-xml
10. To generate a web site into an other locale (in german):
LANGUAGE=de_DE; LANG=de_DE.UTF-8 gramps -O 'Family Tree 1' -a report -p name=navwebpage,target=/../de
11. Finally, to start normal interactive session type:
gramps
Note: These examples are for bash shell.
Syntax may be different for other shells and for Windows.
""")
#-------------------------------------------------------------------------
# ArgParser
#-------------------------------------------------------------------------
class ArgParser:
"""
This class is responsible for parsing the command line arguments (if any)
given to gramps, and determining if a GUI or a CLI session must be started.
A filename and/or options may be specified as arguments.
The valid options are:
-O, --open=FAMILY_TREE Open Family Tree
-C, --create=FAMILY_TREE Create on open if new Family Tree
-i, --import=FILENAME Import file
-e, --export=FILENAME Export file
-r, --remove=PATTERN Remove matching Family Tree(s)
-f, --format=FORMAT Specify Family Tree format
-a, --action=ACTION Specify action
-p, --options=OPTIONS_STRING Specify options
-d, --debug=LOGGER_NAME Enable debug logs
-l [FAMILY_TREE...] List Family Trees
-L [FAMILY_TREE...] List Family Trees in Detail
-t [FAMILY_TREE...] List Family Trees, tab delimited
-u, --force-unlock Force unlock of Family Tree
-s, --show Show config settings
-c, --config=SETTINGS Set config setting(s) and start Gramps
-y, --yes Don't ask to confirm dangerous actions
-q, --quiet Suppress progress indication output
-v, --version Show versions
-b, --databases Show available database backends
-h, --help Display the help
--usage Display usage information
If the filename (no options) is specified, the interactive session is
launched using data from filename. In this mode (filename, no options), the
rest of the arguments are ignored. This is a mode suitable by default for
GUI launchers, mime type handlers, and the like.
If no filename or -i option is given, a new interactive session (empty
database) is launched, since no data is given anyway.
If -O or -i option is given, but no -e or -a options are given, an
interactive session is launched with the ``FILENAME`` (specified with -i).
If both input (-O or -i) and processing (-e or -a) options are given,
interactive session will not be launched.
When using import or export options (-i or -e), the -f option may be
specified to indicate the family tree format.
Possible values for ``ACTION`` are: 'report', 'book' and 'tool'.
Configuration ``SETTINGS`` may be specified using the -c option. The
settings are of the form config.setting[:value]. If used without a value,
the setting is shown.
If the -y option is given, the user's acceptance of any CLI prompt is
assumed. (see :meth:`.cli.user.User.prompt`)
If the -q option is given, extra noise on sys.stderr, such as progress
indicators, is suppressed.
"""
def __init__(self, args):
"""
Pass the command line arguments on creation.
"""
self.args = args
self.open_gui = None
self.open = None
self.exports = []
self.actions = []
self.imports = []
self.removes = []
self.imp_db_path = None
self.list = False
self.list_more = False
self.list_table = False
self.database_names = None
self.help = False
self.usage = False
self.force_unlock = False
self.create = None
self.quiet = False
self.auto_accept = False
self.errors = []
self.parse_args()
#-------------------------------------------------------------------------
# Argument parser: sorts out given arguments
#-------------------------------------------------------------------------
def parse_args(self):
"""
Fill in lists with open, exports, imports, and actions options.
Any errors are added to self.errors
"""
try:
options, leftargs = getopt.getopt(self.args[1:],
SHORTOPTS, LONGOPTS)
except getopt.GetoptError as msg:
# Extract the arguments in the list.
# The % operator replaces the list elements
# with repr() of the list elements
# which is OK for latin characters,
# but not for non latin characters in list elements
cliargs = "[ "
for arg in range(len(self.args) - 1):
cliargs += self.args[arg + 1] + " "
cliargs += "]"
# Must first do str() of the msg object.
msg = str(msg)
self.errors += [(_('Error parsing the arguments'),
msg + '\n' +
_("Error parsing the arguments: %s \n"
"Type gramps --help for an overview of "
"commands, or read the manual pages."
) % cliargs)]
return
# Some args can work on a list of databases:
if leftargs:
for opt_ix in range(len(options)):
option, value = options[opt_ix]
if option in ['-L', '-l', '-t']:
self.database_names = leftargs
leftargs = []
if leftargs:
# if there were an argument without option,
# use it as a file to open and return
self.open_gui = leftargs[0]
print(_("Trying to open: %s ..."
) % leftargs[0],
file=sys.stderr)
#see if force open is on
for opt_ix in range(len(options)):
option, value = options[opt_ix]
if option in ('-u', '--force-unlock'):
self.force_unlock = True
break
return
# Go over all given option and place them into appropriate lists
cleandbg = []
need_to_quit = False
for opt_ix in range(len(options)):
option, value = options[opt_ix]
if option in ['-O', '--open']:
self.open = value
elif option in ['-C', '--create']:
self.create = value
elif option in ['-i', '--import']:
family_tree_format = None
if (opt_ix < len(options) - 1
and options[opt_ix + 1][0] in ('-f', '--format')):
family_tree_format = options[opt_ix + 1][1]
self.imports.append((value, family_tree_format))
elif option in ['-r', '--remove']:
self.removes.append(value)
elif option in ['-e', '--export']:
family_tree_format = None
if (opt_ix < len(options) - 1
and options[opt_ix + 1][0] in ('-f', '--format')):
family_tree_format = options[opt_ix + 1][1]
self.exports.append((value, family_tree_format))
elif option in ['-a', '--action']:
action = value
if action not in ('report', 'tool', 'book'):
print(_("Unknown action: %s. Ignoring."
) % action,
file=sys.stderr)
continue
options_str = ""
if (opt_ix < len(options)-1
and options[opt_ix+1][0] in ('-p', '--options')):
options_str = options[opt_ix+1][1]
self.actions.append((action, options_str))
elif option in ['-d', '--debug']:
print(_('setup debugging'), value, file=sys.stderr)
logger = logging.getLogger(value)
logger.setLevel(logging.DEBUG)
cleandbg += [opt_ix]
elif option in ['-l']:
self.list = True
elif option in ['-L']:
self.list_more = True
elif option in ['-t']:
self.list_table = True
elif option in ['-s', '--show']:
print(_("Gramps config settings from %s:"
) % config.filename)
for sect in config.data:
for setting in config.data[sect]:
print("%s.%s=%s" % (sect, setting,
repr(config.data[sect][setting])))
print()
sys.exit(0)
elif option in ['-b', '--databases']:
default = config.data["database"]["backend"]
pmgr = BasePluginManager.get_instance()
pmgr.reg_plugins(PLUGINS_DIR, self, None)
for plugin in pmgr.get_reg_databases():
pdata = pmgr.get_plugin(plugin.id)
mod = pmgr.load_plugin(pdata)
if mod:
database = getattr(mod, pdata.databaseclass)
summary = database.get_class_summary()
print("Database backend ID:",
pdata.id,
"(default)" if pdata.id == default else "")
for key in sorted(summary.keys()):
print(" ", _("%s:") % key, summary[key])
sys.exit(0)
elif option in ['-c', '--config']:
cfg_name = value
set_value = False
if cfg_name:
if ":" in cfg_name:
cfg_name, new_value = cfg_name.split(":", 1)
set_value = True
if config.has_default(cfg_name):
setting_value = config.get(cfg_name)
print(_("Current Gramps config setting: "
"%(name)s:%(value)s"
) % {'name' : cfg_name,
'value' : repr(setting_value)},
file=sys.stderr)
if set_value:
# does a user want the default config value?
if new_value in ("DEFAULT", _("DEFAULT")):
new_value = config.get_default(cfg_name)
else:
converter = get_type_converter(setting_value)
new_value = converter(new_value)
config.set(cfg_name, new_value)
# translators: indent "New" to match "Current"
print(_(" New Gramps config setting: "
"%(name)s:%(value)s"
) % {'name' : cfg_name,
'value' : repr(config.get(cfg_name))},
file=sys.stderr)
else:
need_to_quit = True
else:
print(_("Gramps: no such config setting: '%s'"
) % cfg_name,
file=sys.stderr)
need_to_quit = True
cleandbg += [opt_ix]
elif option in ['-h', '-?', '--help']:
self.help = True
elif option in ['-u', '--force-unlock']:
self.force_unlock = True
elif option in ['--usage']:
self.usage = True
elif option in ['-y', '--yes']:
self.auto_accept = True
elif option in ['-q', '--quiet']:
self.quiet = True
#clean options list
cleandbg.reverse()
for ind in cleandbg:
del options[ind]
if (len(options) > 0
and self.open is None
and self.imports == []
and self.removes == []
and not (self.list
or self.list_more
or self.list_table
or self.help)):
# Extract and convert to unicode the arguments in the list.
# The % operator replaces the list elements with repr() of
# the list elements, which is OK for latin characters
# but not for non-latin characters in list elements
cliargs = "[ "
for arg in range(len(self.args) - 1):
cliargs += self.args[arg + 1] + ' '
cliargs += "]"
self.errors += [(_('Error parsing the arguments'),
_("Error parsing the arguments: %s \n"
"To use in the command-line mode, supply at "
"least one input file to process."
) % cliargs)]
if need_to_quit:
sys.exit(0)
#-------------------------------------------------------------------------
# Determine the need for GUI
#-------------------------------------------------------------------------
def need_gui(self):
"""
Determine whether we need a GUI session for the given tasks.
"""
if self.errors:
#errors in argument parsing ==> give cli error, no gui needed
return False
if len(self.removes) > 0:
return False
if self.list or self.list_more or self.list_table or self.help:
return False
if self.open_gui:
# No-option argument, definitely GUI
return True
# If we have data to work with:
if self.open or self.imports:
if self.exports or self.actions:
# have both data and what to do with it => no GUI
return False
elif self.create:
if self.open: # create an empty DB, open a GUI to fill it
return True
else: # create a DB, then do the import, with no GUI
self.open = self.create
return False
else:
# data given, but no action/export => GUI
return True
# No data, can only do GUI here
return True
def print_help(self):
"""
If the user gives the --help or -h option, print the output to terminal.
"""
if self.help:
print(_HELP)
sys.exit(0)
def print_usage(self):
"""
If the user gives the --usage print the output to terminal.
"""
if self.usage:
print(_USAGE)
sys.exit(0)
|
sam-m888/gprime
|
gprime/cli/argparser.py
|
Python
|
gpl-2.0
| 20,299
|
[
"Brian"
] |
27e2e532052d6b88ef0533b6092f97706a678172f333a3c422193ebbef8ffa08
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.cuda_init
import espressomd.electrostatics
from tests_common import abspath
@utx.skipIfMissingFeatures("ELECTROSTATICS")
class CoulombCloudWall(ut.TestCase):
"""This compares p3m, p3m_gpu electrostatic forces and energy against
stored data."""
S = espressomd.System(box_l=[1.0, 1.0, 1.0])
data = np.genfromtxt(
abspath("data/coulomb_cloud_wall_duplicated_system.data"))
tolerance = 1E-3
# Reference energy from p3m in the tcl test case
reference_energy = 2. * 148.94229549
def setUp(self):
self.S.box_l = (10, 10, 20)
self.S.time_step = 0.01
self.S.cell_system.skin = 0.4
# Add particles to system and store reference forces in hash
# Input format: id pos q f
self.S.part.add(pos=self.data[:, 1:4], q=self.data[:, 4])
self.forces = self.data[:, 5:8]
def tearDown(self):
self.S.part.clear()
self.S.actors.clear()
def compare(self, method_name, energy=True):
# Compare forces and energy now in the system to stored ones
# Force
force_diff = np.linalg.norm(self.S.part[:].f - self.forces, axis=1)
self.assertLess(
np.mean(force_diff), self.tolerance,
msg="Absolute force difference too large for method " + method_name)
# Energy
if energy:
self.assertAlmostEqual(
self.S.analysis.energy()["total"], self.reference_energy,
delta=self.tolerance,
msg="Absolute energy difference too large for " + method_name)
# Tests for individual methods
@utx.skipIfMissingFeatures("P3M")
def test_p3m(self):
self.S.actors.add(
espressomd.electrostatics.P3M(
prefactor=1, r_cut=1.001, accuracy=1e-3,
mesh=[64, 64, 128], cao=7, alpha=2.70746, tune=False))
self.S.integrator.run(0)
self.compare("p3m", energy=True)
@utx.skipIfMissingGPU()
def test_p3m_gpu(self):
self.S.actors.add(
espressomd.electrostatics.P3MGPU(
prefactor=1,
r_cut=1.001,
accuracy=1e-3,
mesh=[64, 64, 128],
cao=7,
alpha=2.70746,
tune=False))
self.S.integrator.run(0)
self.compare("p3m_gpu", energy=False)
def test_zz_deactivation(self):
# The energy is 0 if no method is active
self.assertEqual(self.S.analysis.energy()["total"], 0.0)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/coulomb_cloud_wall_duplicated.py
|
Python
|
gpl-3.0
| 3,375
|
[
"ESPResSo"
] |
41c89e5b4906dbd3f331852a8be5f5cdeab6ed5fb7bbbdcadaf14cc4a4bceccf
|
#!/usr/local/bin/python -i
# preceeding line should have path for Python on your machine
# vizplotgui_vmd.py
# Purpose: viz running LAMMPS simulation via VMD with plot and GUI
# Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
p.single(ntimestep)
v.append('tmp.pdb','pdb')
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on VMD window via Pizza.py vmd tool
# just proc 0 handles reading of dump file and viz
if me == 0:
from vmd import vmd
v = vmd()
v('menu main off')
v.rep('VDW')
from dump import dump
from pdbfile import pdbfile
d = dump('tmp.dump',0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
v.new('tmp.pdb','pdb')
# display GUI with run/stop buttons and slider for temperature
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
val-github/lammps-dev
|
python/examples/vizplotgui_vmd.py
|
Python
|
gpl-2.0
| 4,392
|
[
"LAMMPS",
"VMD"
] |
7a291fcb85727dd3d51e04ae106e5d5d28a4a7e940206a554ae2186da7ba82ea
|
#!/usr/bin/env python3
# Computes exact similarity solution "test Q" at an initial time and after
# a specified runtime. Suggests a PISM run for this runtime. The exact
# solution for velocity and for thickness can be compared to PISM output.
#
# For the similarity solution see equation (3.19) in
# [\ref PeglerListerWorster2012] = PLW2012:
# S. Pegler, J. Lister, and M. G. Worster, 2012. Release of a viscous
# power-law fluid over an inviscid ocean", J. Fluid Mech. 700, 63--76.
# notes:
# don't use -pik because it will do -kill_icebergs and the whole thing is an iceberg
# could use -ssa_view_nuh if desired
# FIXME: need to carefully set hardness
from pylab import *
import sys
import time
# try different netCDF modules
try:
from netCDF4 import Dataset as CDF
except:
print("netCDF4 is not installed!")
sys.exit(1)
from optparse import OptionParser
parser = OptionParser()
parser.usage = \
"""%prog MX DURATION
where MX is number of grid points,
DURATION is time in years for run,
Example: Try this diagnostic-only run:
$ export MX=101 YY=0
$ ./exactQ.py $MX $YY
$ pismr -o outQ$MX.nc -y $YY -i initQ$MX.nc -bootstrap -Mx $MX -My $MX -Mz 21 -Lz 1500 -z_spacing equal -surface given -stress_balance ssa -energy none -yield_stress constant -tauc 1e6 -ssa_dirichlet_bc -cfbc -part_grid -o_order zyx -ssa_e 1.0 -ssa_flow_law isothermal_glen
"""
parser.description = "A script which runs Test Q."
(options, args) = parser.parse_args()
if (len(args) < 2) | (len(args) > 2):
print("ERROR; exactQ.py needs two arguments; run with --help to see usage")
print("... EXITING")
exit(-1)
SperA = 31556926.0
Mx = int(args[0])
My = Mx
runtime = float(args[1]) * SperA
ncfile = "initQ%d.nc" % Mx
# basic parameters
g = 9.81
rho = 910.0 # density of ice; kg/m^3
rhow = 1028.0 # density of ocean water; kg/m^3
n = 3.0
barB = 1.9e8 # strength of shelf; Pa s^(1/3); from MacAyeal et al 1996;
# FIXME is this equal to \tilde mu or not?
# derived parameters
m = (1.0 / n) - 1.0
gprime = (rhow - rho) * g / rhow # see just after (2.1) in PLW2012
nurescale = 3.0 ** (m / 2.0) * barB / rho # see just after (3.19) in PLW2012
C0 = 12.0 * nurescale / gprime # constant in (3.19) in PLW2012
def timeQ(H0):
# invert the first formula in (3.19) in PLW2012 to get t = f(H0)
t = (1.0 / (2.0 * n)) * (C0 / H0) ** n
return t
def geomvelQ(t, r, V):
# computes (3.19) in PLW2012, assuming t,V are scalar and r is array
# returns H,u with same size as r, but R is scalar
tnt = 2.0 * n * t
H = C0 * tnt ** (-1.0 / n) * ones(shape(r))
u = r / tnt
R = (V / (C0 * pi)) ** 0.5 * tnt ** (1.0 / (2.0 * n))
return H, u, R
# similarity solution: choose dimensions, get told the time and volume
H0 = 1000.0 # m
R0 = 100.0e3 # m; 100 km
V = pi * R0 ** 2 * H0
t0 = timeQ(H0)
t = t0 + runtime
print('exact Test Q has the following parameters for the start time t=t0:')
print(' time t0 = %.3e s = %f a' % (t0, t0 / SperA))
print(' thickness H0 = %.3f m' % H0)
print(' radius R0 = %.3f km' % (R0 / 1.0e3))
print('building PISM bootstrap file %s with Mx = %d and My = %d grid points ...' % (ncfile, Mx, My))
# set up the grid:
Lx = 200.0e3
Ly = Lx
x = linspace(-Lx, Lx, Mx)
y = linspace(-Ly, Ly, My)
[xx, yy] = meshgrid(x, y) # if there were "ndgrid" in numpy?
rr = sqrt(xx ** 2 + yy ** 2)
fill_value = nan
# create initial fields
topg = zeros((Mx, My)) - 1200.0 # so it is floating
ice_surface_temp = zeros((Mx, My)) + 263.15 # -10 degrees Celsius; just a guess
climatic_mass_balance = zeros((Mx, My))
thk = zeros((Mx, My))
thk[rr <= R0] = H0
zerossabc = zeros((Mx, My))
# create exact solution fields
thk_exact, c_exact, R_exact = geomvelQ(t, rr, V)
thk_exact[rr > R_exact] = 0.0
c_exact *= SperA
c_exact[rr > R_exact] = 0.0
print('exact Test Q at time t=%f years is in these variables:' % (t / SperA))
print(' c_exact, with max = %.3e' % c_exact.max())
print(' thk_exact, with max = %.3e' % thk_exact.max())
print('and R_exact = %.3f km' % (R_exact / 1.0e3))
# Write the data:
nc = CDF(ncfile, "w", format='NETCDF3_CLASSIC') # for netCDF4 module
# Create dimensions x and y
nc.createDimension("x", size=Mx)
nc.createDimension("y", size=My)
x_var = nc.createVariable("x", 'f4', dimensions=("x",))
x_var.units = "m"
x_var.long_name = "easting"
x_var.standard_name = "projection_x_coordinate"
x_var[:] = x
y_var = nc.createVariable("y", 'f4', dimensions=("y",))
y_var.units = "m"
y_var.long_name = "northing"
y_var.standard_name = "projection_y_coordinate"
y_var[:] = y
def def_var(nc, name, units, fillvalue):
# dimension transpose is standard: "float thk(y, x)" in NetCDF file
var = nc.createVariable(name, 'f', dimensions=("y", "x"), fill_value=fillvalue)
var.units = units
return var
bed_var = def_var(nc, "topg", "m", fill_value)
bed_var.standard_name = "bedrock_altitude"
bed_var[:] = topg
thk_var = def_var(nc, "thk", "m", fill_value)
thk_var.standard_name = "land_ice_thickness"
thk_var[:] = thk
climatic_mass_balance_var = def_var(nc, "climatic_mass_balance", "kg m-2 s-1", fill_value)
climatic_mass_balance_var.standard_name = "land_ice_surface_specific_mass_balance"
climatic_mass_balance_var[:] = climatic_mass_balance
ice_surface_temp_var = def_var(nc, "ice_surface_temp", "K", fill_value)
ice_surface_temp_var[:] = ice_surface_temp
u_bc_var = def_var(nc, "u_bc", "m s-1", fill_value)
u_bc_var[:] = zerossabc.copy()
v_bc_var = def_var(nc, "v_bc", "m s-1", fill_value)
v_bc_var[:] = zerossabc.copy()
vel_bc_mask_var = nc.createVariable("vel_bc_mask", "i", dimensions=("y", "x"))
vel_bc_mask_var[:] = ((xx == 0.0) & (yy == 0.0))
thk_exact_var = def_var(nc, "thk_exact", "m", fill_value)
thk_exact_var[:] = thk_exact
c_exact_var = def_var(nc, "c_exact", "m year-1", fill_value)
c_exact_var[:] = c_exact
# set global attributes
nc.Conventions = 'CF-1.4'
historysep = ' '
historystr = time.asctime() + ': ' + historysep.join(sys.argv) + '\n'
setattr(nc, 'history', historystr)
nc.close()
print('file %s written ...' % ncfile)
print(' ... now run FIXME')
|
pism/pism
|
src/verification/tests/exactQ.py
|
Python
|
gpl-3.0
| 6,138
|
[
"NetCDF"
] |
7a104b5220a6e3ccc9d2773417727997279917ba73a8022fb15f1039519a84cb
|
# -*- coding: utf-8 -*-
import datetime
import hashlib
import json
import os
import tarfile
from copy import deepcopy
from tempfile import mkdtemp
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from requests.exceptions import RequestException
import mkt
import mkt.site.tests
from mkt.developers.models import ActivityLog
from mkt.files.models import File, FileUpload
from mkt.reviewers.models import RereviewQueue
from mkt.site.fixtures import fixture
from mkt.site.helpers import absolutify
from mkt.site.storage_utils import private_storage, public_storage
from mkt.site.utils import app_factory
from mkt.users.models import UserProfile
from mkt.versions.models import Version
from mkt.webapps.cron import dump_user_installs_cron
from mkt.webapps.models import AddonUser, Webapp
from mkt.webapps.tasks import (dump_app, export_data,
notify_developers_of_failure, pre_generate_apk,
PreGenAPKError, rm_directory, update_manifests)
original = {
"version": "0.1",
"default_locale": "en-US",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"32": "http://test.com/icon-32.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
"locales": {
"de": {
"name": "Mozilla Kugel"
},
"fr": {
"description": "Testing name-less locale"
}
}
}
new = {
"version": "1.0",
"default_locale": "en-US",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"32": "http://test.com/icon-32.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
"locales": {
"de": {
"name": "Mozilla Kugel"
},
"fr": {
"description": "Testing name-less locale"
}
},
"developer": {
"name": "Mozilla",
"url": "http://www.mozilla.org/"
}
}
ohash = ('sha256:'
'fc11fba25f251d64343a7e8da4dfd812a57a121e61eb53c78c567536ab39b10d')
nhash = ('sha256:'
'409fbe87dca5a4a7937e3dea27b69cb3a3d68caf39151585aef0c7ab46d8ee1e')
class TestUpdateManifest(mkt.site.tests.TestCase):
fixtures = fixture('user_2519', 'user_999')
def setUp(self):
UserProfile.objects.get_or_create(id=settings.TASK_USER_ID)
# Not using app factory since it creates translations with an invalid
# locale of "en-us".
self.addon = Webapp.objects.create()
self.version = Version.objects.create(addon=self.addon,
_developer_name='Mozilla')
self.file = File.objects.create(
version=self.version, hash=ohash, status=mkt.STATUS_PUBLIC,
filename='%s-%s' % (self.addon.id, self.version.id))
self.addon.name = {
'en-US': 'MozillaBall',
'de': 'Mozilla Kugel',
}
self.addon.status = mkt.STATUS_PUBLIC
self.addon.manifest_url = 'http://nowhere.allizom.org/manifest.webapp'
self.addon.save()
self.addon.update_version()
self.addon.addonuser_set.create(user_id=999)
with public_storage.open(self.file.file_path, 'w') as fh:
fh.write(json.dumps(original))
# This is the hash to set the get_content_hash to, for showing
# that the webapp has been updated.
self._hash = nhash
# Let's use deepcopy so nested dicts are copied as new objects.
self.new = deepcopy(new)
self.content_type = 'application/x-web-app-manifest+json'
req_patcher = mock.patch('mkt.developers.tasks.requests.get')
self.req_mock = req_patcher.start()
self.addCleanup(req_patcher.stop)
self.response_mock = mock.Mock(status_code=200)
self.response_mock.iter_content.return_value = mock.Mock(
next=self._data)
self.response_mock.headers = {'content-type': self.content_type}
self.req_mock.return_value = self.response_mock
validator_patcher = mock.patch('mkt.webapps.tasks.validator')
self.validator = validator_patcher.start()
self.addCleanup(validator_patcher.stop)
self.validator.return_value = {}
@mock.patch('mkt.webapps.tasks._get_content_hash')
def _run(self, _get_content_hash, **kw):
# Will run the task and will act depending upon how you've set hash.
_get_content_hash.return_value = self._hash
update_manifests(ids=(self.addon.pk,), **kw)
def _data(self):
return json.dumps(self.new)
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
@mock.patch('mkt.webapps.models.copy_stored_file')
def test_new_version_not_created(self, _copy_stored_file, _manifest_json):
# Test that update_manifest doesn't create multiple versions/files.
eq_(self.addon.versions.count(), 1)
old_version = self.addon.current_version
old_file = self.addon.get_latest_file()
self._run()
app = self.addon.reload()
version = app.current_version
file_ = app.get_latest_file()
# Test that our new version looks good.
eq_(app.versions.count(), 1)
eq_(version, old_version, 'Version created')
eq_(file_, old_file, 'File created')
path = FileUpload.objects.all()[0].path
_copy_stored_file.assert_called_with(
path, os.path.join(version.path_prefix, file_.filename),
src_storage=private_storage, dst_storage=private_storage)
_manifest_json.assert_called_with(file_)
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_version_updated(self):
self._run()
self.new['version'] = '1.1'
self._hash = 'foo'
self._run()
app = self.addon.reload()
eq_(app.versions.latest().version, '1.1')
def test_not_log(self):
self._hash = ohash
self._run()
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 0)
def test_log(self):
self._run()
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 1)
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_pending(self, mock_):
self.addon.update(status=mkt.STATUS_PENDING)
call_command('process_addons', task='update_manifests')
assert mock_.called
def test_pending_updates(self):
"""
PENDING apps don't have a current version. This test makes sure
everything still works in this case.
"""
self.addon.update(status=mkt.STATUS_PENDING)
self._run()
eq_(self.addon.latest_version.reload().version, '1.0')
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_approved(self, mock_):
self.addon.update(status=mkt.STATUS_APPROVED)
call_command('process_addons', task='update_manifests')
assert mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_disabled(self, mock_):
self.addon.update(status=mkt.STATUS_DISABLED)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_packaged(self, mock_):
self.addon.update(is_packaged=True)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_get_webapp(self, mock_):
eq_(self.addon.status, mkt.STATUS_PUBLIC)
call_command('process_addons', task='update_manifests')
assert mock_.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_update_manifest(self, retry, fetch):
fetch.return_value = '{}'
update_manifests(ids=(self.addon.pk,))
assert not retry.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_fail(self, retry, fetch):
later = datetime.datetime.now() + datetime.timedelta(seconds=3600)
fetch.side_effect = RuntimeError
update_manifests(ids=(self.addon.pk,))
eq_(retry.call_count, 1)
# Not using assert_called_with b/c eta is a datetime.
eq_(retry.call_args[1]['args'], ([self.addon.pk],))
eq_(retry.call_args[1]['kwargs'], {'check_hash': True,
'retries': {self.addon.pk: 1}})
self.assertCloseToNow(retry.call_args[1]['eta'], later)
eq_(retry.call_args[1]['max_retries'], 5)
eq_(len(mail.outbox), 0)
def test_notify_failure_lang(self):
user1 = UserProfile.objects.get(pk=999)
user2 = UserProfile.objects.get(pk=2519)
AddonUser.objects.create(addon=self.addon, user=user2)
user1.update(lang='de')
user2.update(lang='en')
notify_developers_of_failure(self.addon, 'blah')
eq_(len(mail.outbox), 2)
ok_(u'Mozilla Kugel' in mail.outbox[0].subject)
ok_(u'MozillaBall' in mail.outbox[1].subject)
def test_notify_failure_with_rereview(self):
RereviewQueue.flag(self.addon, mkt.LOG.REREVIEW_MANIFEST_CHANGE,
'This app is flagged!')
notify_developers_of_failure(self.addon, 'blah')
eq_(len(mail.outbox), 0)
def test_notify_failure_not_public(self):
self.addon.update(status=mkt.STATUS_PENDING)
notify_developers_of_failure(self.addon, 'blah')
eq_(len(mail.outbox), 0)
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_3rd_attempt(self, retry, fetch):
fetch.side_effect = RuntimeError
update_manifests(ids=(self.addon.pk,), retries={self.addon.pk: 2})
# We already tried twice before, this is the 3rd attempt,
# We should notify the developer that something is wrong.
eq_(len(mail.outbox), 1)
msg = mail.outbox[0]
ok_(msg.subject.startswith('Issue with your app'))
expected = u'Failed to get manifest from %s' % self.addon.manifest_url
ok_(expected in msg.body)
ok_(settings.SUPPORT_GROUP in msg.body)
# We should have scheduled a retry.
assert retry.called
# We shouldn't have put the app in the rereview queue yet.
assert not RereviewQueue.objects.filter(addon=self.addon).exists()
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
@mock.patch('mkt.webapps.tasks.notify_developers_of_failure')
def test_manifest_fetch_4th_attempt(self, notify, retry, fetch):
fetch.side_effect = RuntimeError
update_manifests(ids=(self.addon.pk,), retries={self.addon.pk: 3})
# We already tried 3 times before, this is the 4th and last attempt,
# we shouldn't retry anymore, instead we should just add the app to
# the re-review queue. We shouldn't notify the developer either at this
# step, it should have been done before already.
assert not notify.called
assert not retry.called
assert RereviewQueue.objects.filter(addon=self.addon).exists()
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
def test_manifest_validation_failure(self, _iarc):
# We are already mocking validator, but this test needs to make sure
# it actually saves our custom validation result, so add that.
def side_effect(upload_id, **kwargs):
upload = FileUpload.objects.get(pk=upload_id)
upload.validation = json.dumps(validation_results)
upload.save()
validation_results = {
'errors': 1,
'messages': [{
'context': None,
'uid': 'whatever',
'column': None,
'id': ['webapp', 'detect_webapp', 'parse_error'],
'file': '',
'tier': 1,
'message': 'JSON Parse Error',
'type': 'error',
'line': None,
'description': 'The webapp extension could not be parsed due '
'to a syntax error in the JSON.'
}]
}
self.validator.side_effect = side_effect
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
eq_(len(mail.outbox), 1)
msg = mail.outbox[0]
upload = FileUpload.objects.get()
validation_url = absolutify(reverse(
'mkt.developers.upload_detail', args=[upload.uuid]))
ok_(msg.subject.startswith('Issue with your app'))
ok_(validation_results['messages'][0]['message'] in msg.body)
ok_(validation_url in msg.body)
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_name_change_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['name'] = 'Mozilla Ball Ultimate Edition'
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
ok_(_iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_locale_name_add_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['locales'] = {'es': {'name': 'eso'}}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
log = ActivityLog.objects.filter(
action=mkt.LOG.REREVIEW_MANIFEST_CHANGE.id)[0]
eq_(log.details.get('comments'),
u'Locales added: "eso" (es).')
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_locale_name_change_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['locales'] = {'de': {'name': 'Bippity Bop'}}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
log = ActivityLog.objects.filter(
action=mkt.LOG.REREVIEW_MANIFEST_CHANGE.id)[0]
eq_(log.details.get('comments'),
u'Locales updated: "Mozilla Kugel" -> "Bippity Bop" (de).')
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_default_locale_change(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['name'] = u'Mozilla Balón'
self.new['default_locale'] = 'es'
self.new['locales'] = {'en-US': {'name': 'MozillaBall'}}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
eq_(self.addon.reload().default_locale, 'es')
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
log = ActivityLog.objects.filter(
action=mkt.LOG.REREVIEW_MANIFEST_CHANGE.id)[0]
eq_(log.details.get('comments'),
u'Manifest name changed from "MozillaBall" to "Mozilla Balón". '
u'Default locale changed from "en-US" to "es". '
u'Locales added: "Mozilla Balón" (es).')
ok_(_iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_locale_name_removal_no_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
# Note: Not using `del` b/c copy doesn't copy nested structures.
self.new['locales'] = {
'fr': {'description': 'Testing name-less locale'}
}
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 0)
# Log for manifest update.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 1)
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_force_rereview(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['name'] = 'Mozilla Ball Ultimate Edition'
# We're setting the hash to the same value.
self.file.update(hash=nhash)
eq_(RereviewQueue.objects.count(), 0)
self._run(check_hash=False)
# We should still get a rereview since we bypassed the manifest check.
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
ok_(_iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_support_locales_change(self, _manifest, _iarc):
"""
Test both PUBLIC and PENDING to catch apps w/o `current_version`.
"""
for status in (mkt.STATUS_PUBLIC, mkt.STATUS_PENDING):
self.addon.update(status=status)
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with name change.
self.new['locales'].update({'es': {'name': u'Mozilla Balón'}})
self._run()
ver = self.version.reload()
eq_(ver.supported_locales, 'de,es,fr')
ok_(not _iarc.called)
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_manifest_support_developer_change(self, _manifest, _iarc):
# Mock original manifest file lookup.
_manifest.return_value = original
# Mock new manifest with developer name change.
self.new['developer']['name'] = 'Allizom'
self._run()
ver = self.version.reload()
eq_(ver.developer_name, 'Allizom')
# We should get a re-review because of the developer name change.
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps([self.addon]).count(), 2)
ok_(_iarc.called)
class TestDumpApps(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def test_dump_app(self):
path = dump_app(337141)
with private_storage.open(path, 'r') as fd:
result = json.load(fd)
eq_(result['id'], 337141)
class TestDumpUserInstalls(mkt.site.tests.TestCase):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
super(TestDumpUserInstalls, self).setUp()
# Create a user install.
self.app = Webapp.objects.get(pk=337141)
self.user = UserProfile.objects.get(pk=2519)
self.app.installed.create(user=self.user)
self.export_directory = mkdtemp()
self.hash = hashlib.sha256('%s%s' % (str(self.user.pk),
settings.SECRET_KEY)).hexdigest()
self.path = os.path.join('users', self.hash[0], '%s.json' % self.hash)
self.tarfile = None
self.tarfile_file = None
def tearDown(self):
rm_directory(self.export_directory)
if self.tarfile:
self.tarfile.close()
if self.tarfile_file:
self.tarfile_file.close()
super(TestDumpUserInstalls, self).tearDown()
def _test_export_is_created(self):
expected_files = [
'license.txt',
'readme.txt',
]
actual_files = self.tarfile.getnames()
for expected_file in expected_files:
assert expected_file in actual_files, expected_file
def create_export(self):
date = datetime.datetime.today().strftime('%Y-%m-%d')
with self.settings(DUMPED_USERS_PATH=self.export_directory):
dump_user_installs_cron()
tarball_path = os.path.join(self.export_directory,
'tarballs',
date + '.tgz')
self.tarfile_file = private_storage.open(tarball_path)
self.tarfile = tarfile.open(fileobj=self.tarfile_file)
return self.tarfile
def dump_and_load(self):
self.create_export()
self._test_export_is_created()
return json.load(self.tarfile.extractfile(self.path))
def test_dump_user_installs(self):
data = self.dump_and_load()
eq_(data['user'], self.hash)
eq_(data['region'], self.user.region)
eq_(data['lang'], self.user.lang)
installed = data['installed_apps'][0]
eq_(installed['id'], self.app.id)
eq_(installed['slug'], self.app.app_slug)
self.assertCloseToNow(
datetime.datetime.strptime(installed['installed'],
'%Y-%m-%dT%H:%M:%S'),
datetime.datetime.utcnow())
def test_dump_exludes_deleted(self):
"""We can't recommend deleted apps, so don't include them."""
app = app_factory()
app.installed.create(user=self.user)
app.delete()
data = self.dump_and_load()
eq_(len(data['installed_apps']), 1)
installed = data['installed_apps'][0]
eq_(installed['id'], self.app.id)
def test_dump_recommendation_opt_out(self):
self.user.update(enable_recommendations=False)
with self.assertRaises(KeyError):
# File shouldn't exist b/c we didn't write it.
self.dump_and_load()
@mock.patch('mkt.webapps.tasks.requests')
class TestPreGenAPKs(mkt.site.tests.WebappTestCase):
def setUp(self):
super(TestPreGenAPKs, self).setUp()
self.manifest_url = u'http://some-âpp.net/manifest.webapp'
self.app.update(manifest_url=self.manifest_url)
def test_get(self, req):
res = mock.Mock()
req.get.return_value = res
pre_generate_apk.delay(self.app.id)
assert req.get.called, 'APK requested from factory'
assert req.get.mock_calls[0].startswith(
settings.PRE_GENERATE_APK_URL), req.get.mock_calls
assert res.raise_for_status.called, 'raise on bad status codes'
def test_get_packaged(self, req):
self.app.update(manifest_url=None, is_packaged=True)
# Make sure this doesn't raise an exception.
pre_generate_apk.delay(self.app.id)
assert req.get.called, 'APK requested from factory'
def test_no_manifest(self, req):
self.app.update(manifest_url=None)
with self.assertRaises(PreGenAPKError):
pre_generate_apk.delay(self.app.id)
def test_error_getting(self, req):
req.get.side_effect = RequestException
with self.assertRaises(PreGenAPKError):
pre_generate_apk.delay(self.app.id)
class TestExportData(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.export_directory = mkdtemp()
self.existing_tarball = os.path.join(
self.export_directory, 'tarballs', '2004-08-15')
with public_storage.open(self.existing_tarball, 'w') as fd:
fd.write('.')
self.app_path = 'apps/337/337141.json'
self.tarfile_file = None
self.tarfile = None
def tearDown(self):
rm_directory(self.export_directory)
if self.tarfile:
self.tarfile.close()
if self.tarfile_file:
self.tarfile_file.close()
super(TestExportData, self).tearDown()
def create_export(self, name):
with self.settings(DUMPED_APPS_PATH=self.export_directory):
export_data(name=name)
tarball_path = os.path.join(self.export_directory,
'tarballs',
name + '.tgz')
self.tarfile_file = public_storage.open(tarball_path)
self.tarfile = tarfile.open(fileobj=self.tarfile_file)
return self.tarfile
def test_export_is_created(self):
expected_files = [
self.app_path,
'license.txt',
'readme.txt',
]
tarball = self.create_export('tarball-name')
actual_files = tarball.getnames()
for expected_file in expected_files:
assert expected_file in actual_files, expected_file
# Make sure we didn't touch old tarballs by accident.
assert public_storage.exists(self.existing_tarball)
@mock.patch('mkt.webapps.tasks.dump_app')
def test_not_public(self, dump_app):
app = Webapp.objects.get(pk=337141)
app.update(status=mkt.STATUS_PENDING)
self.create_export('tarball-name')
assert not dump_app.called
def test_removed(self):
# At least one public app must exist for dump_apps to run.
app_factory(name='second app', status=mkt.STATUS_PUBLIC)
app_path = os.path.join(self.export_directory, self.app_path)
app = Webapp.objects.get(pk=337141)
app.update(status=mkt.STATUS_PUBLIC)
self.create_export('tarball-name')
assert private_storage.exists(app_path)
app.update(status=mkt.STATUS_PENDING)
self.create_export('tarball-name')
assert not private_storage.exists(app_path)
@mock.patch('mkt.webapps.tasks.dump_app')
def test_public(self, dump_app):
self.create_export('tarball-name')
assert dump_app.called
|
washort/zamboni
|
mkt/webapps/tests/test_tasks.py
|
Python
|
bsd-3-clause
| 27,242
|
[
"exciting"
] |
36479b61bfb382d326f474970d6507666f834b5e81627ad197e9bec0290d8bbe
|
from __future__ import annotations
import os
import procrunner
import pytest
from dxtbx.model.experiment_list import ExperimentListFactory
def plot_beam_centre_error(ideal_bc, obs_bc):
import matplotlib.pyplot as plt
ideal_x, ideal_y = zip(*ideal_bc)
obs_x, obs_y = zip(*obs_bc)
del_x = [a - b for a, b in zip(obs_x, ideal_x)]
del_y = [a - b for a, b in zip(obs_y, ideal_y)]
scan_points = range(len(ideal_x))
plt.plot(scan_points, del_x, scan_points, del_y)
plt.xlabel("Scan point")
plt.ylabel("Beam centre residual (obs - ideal) (pixels)")
plt.show()
def test_refinement_and_compare_with_known_truth(dials_regression, run_in_tmpdir):
# use data generated by simulation for this test
data_dir = os.path.join(
dials_regression, "refinement_test_data", "varying_beam_direction"
)
experiments_path = os.path.join(data_dir, "refined_static.json")
pickle_path = os.path.join(data_dir, "refined_static.pickle")
for pth in (experiments_path, pickle_path):
assert os.path.exists(pth)
# Run refinement and load models
result = procrunner.run(
[
"dials.refine",
experiments_path,
pickle_path,
"scan_varying=True",
"crystal.orientation.force_static=True",
"crystal.unit_cell.force_static=True",
"beam.force_static=False",
"beam.fix=wavelength",
]
)
assert not result.returncode and not result.stderr
exp = ExperimentListFactory.from_json_file("refined.expt", check_format=False)[0]
beam, detector = exp.beam, exp.detector
# Beam centre at every scan-point
s0_scan_points = [beam.get_s0_at_scan_point(i) for i in range(beam.num_scan_points)]
bc_scan_points = [detector[0].get_beam_centre_px(s0) for s0 in s0_scan_points]
# Set up the nanoBragg object as used in the simulation
from .sim_images import Simulation
sim = Simulation()
sim.set_varying_beam(along="both")
# Simulation beam centre at every scan-point
sim_s0_scan_points = [
sim.beam.get_s0_at_scan_point(i) for i in range(sim.beam.num_scan_points)
]
sim_bc_scan_points = [
sim.detector[0].get_beam_centre_px(s0) for s0 in sim_s0_scan_points
]
assert beam.num_scan_points == sim.beam.num_scan_points
# Generate a plot. This shows that the beam centre is worse at the edges of
# the scan. This is what we expect as the centroids at the scan edges are
# least well determined because of the truncation of found spots. At these
# edges the error approaches 0.15 pixels, whilst in the central region of
# the scan it is within 0.05 pixels.
#
# plot_beam_centre_error(sim_bc_scan_points, bc_scan_points)
# Compare the results.
for bc1, bc2 in zip(sim_bc_scan_points, bc_scan_points):
assert bc2 == pytest.approx(bc1, abs=0.15)
|
dials/dials
|
tests/algorithms/refinement/test_scan_varying_beam_refinement.py
|
Python
|
bsd-3-clause
| 2,914
|
[
"CRYSTAL"
] |
14df39e6f066a819c32ec722e1c167b74f2491204b41b81686c41e7a2cb7805f
|
import os
from ase import Atom, Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.test import equal
a = 7.5 * Bohr
n = 16
atoms = Atoms([Atom('He', (0.0, 0.0, 0.0))], cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=1, xc='LDA')
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
e1ref = calc.get_reference_energy()
de12 = calc.get_xc_difference('PBE')
calc.set(xc='PBE')
e2 = atoms.get_potential_energy()
niter2 = calc.get_number_of_iterations()
e2ref = calc.get_reference_energy()
de21 = calc.get_xc_difference('LDA')
print e1ref + e1 + de12, e2ref + e2
print e1ref + e1, e2ref + e2 + de21
print de12, de21
equal(e1ref + e1 + de12, e2ref + e2, 0.02)
equal(e1ref + e1, e2ref + e2 + de21, 0.025)
calc.write('PBE.gpw')
de21b = GPAW('PBE.gpw').get_xc_difference('LDA')
print de21, de21b
equal(de21, de21b, 9e-8)
energy_tolerance = 0.00007
niter_tolerance = 0
equal(e1, -0.0961003634812, energy_tolerance) # svnversion 5252
equal(niter1, 14, niter_tolerance) # svnversion 5252
equal(e2, -0.0790249564625, energy_tolerance) # svnversion 5252
equal(niter2, 12, niter_tolerance) # svnversion 5252
|
qsnake/gpaw
|
gpaw/test/nonselfconsistentLDA.py
|
Python
|
gpl-3.0
| 1,173
|
[
"ASE",
"GPAW"
] |
a3226edb78a4a63fa88d9fa3c17a5eca8982b0983618265ef2a4da0e8f952df7
|
#TFLOW Segment: Find Gene Annotations for Query Sequence File useing Reference Nucleotide or
# Protein Sequence File
#
#Dan Stribling
#Florida State University
#Center for Genomics and Personalized Medicine
#Version 0.9, 05/22/2015
#Project URL: http://www.github.com/fsugenomics/tflow
import os.path
import sys
import subprocess
import shutil
REFERENCE_TYPES = {'Protein':'prot', 'protein':'prot', 'prot':'prot',
'Nucleotide':'nucl', 'nucleotide':'nucl', 'nucl':'nucl'}
if __name__ == "__main__" or __package__ is None:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../'))
import tflow.segments
__package__ = "tflow.segments"
from .parser_class import OutputParser
from ..util import print_exit, print_return
from .. import util
from .. import local_settings
from ..annotation import (Annotation, Annotation_Record, Annotation_Database,
Annotation_Map, Name_Map)
if hasattr(local_settings, 'BLAST_LOCATION'):
BLAST_LOCATION = local_settings.BLAST_LOCATION
else:
BLAST_LOCATION = ''
if hasattr(local_settings, 'BLAST_EXEC'):
BLAST_EXEC = local_settings.BLAST_EXEC
else:
BLAST_EXEC = os.path.join(BLAST_LOCATION, 'blastx')
if hasattr(local_settings, 'MAKE_BLAST_DB_LOCATION'):
MAKE_BLAST_DB_LOCATION = local_settings.MAKE_BLAST_DB_LOCATION
else:
MAKE_BLAST_DB_LOCATION = ''
if hasattr(local_settings, 'MAKE_BLAST_DB_EXEC'):
MAKE_BLAST_DB_EXEC = local_settings.MAKE_BLAST_DB_EXEC
else:
MAKE_BLAST_DB_EXEC = os.path.join(MAKE_BLAST_DB_LOCATION, 'makeblastdb')
JOB_TYPE = 'Prototype_Find_Annotations'
PROGRAM_URL = 'http://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download'
SEGMENT_FOR_VERSION = '2.2.29'
BLAST_COMMAND = BLAST_EXEC
BLAST_COMMAND_LIST = [BLAST_COMMAND]
BLAST_DB_COMMAND = MAKE_BLAST_DB_EXEC
BLAST_DB_COMMAND_LIST = [BLAST_DB_COMMAND]
TEST_COMMAND = '-h'
OUT_FILE = 'Prototype_Find_Annotations.out'
MILESTONES = ['Annotation Complete']
TERMINAL_FLAGS = []
FAILURE_FLAGS = ['Exiting Early...',
'Traceback',
'Exception: ERROR',
'Not Found',
]
MATCH_PREFIX = 'Matches'
ANNOTATION_PREFIX = 'Annotations'
DEFAULT_SETTINGS = {'copy_input_file':False,
'max_CPU':'4',
'blast_evalue':'1e-5',
'evalue_cutoffs':['1e-10', '1e-20', '1e-40'],
'blast_result_file':'blast.out',
'db_title':'BLAST_DB',
'max_matches':'5',
'write_all_matches':True,
'write_best_matches':True,
'verbose_tracking':True,
#'reference_type':'nucl',
#TFLOW Settings
'blast_command':BLAST_COMMAND,
'blast_command_list':BLAST_COMMAND_LIST,
'blast_db_command':BLAST_DB_COMMAND,
'blast_db_command_list':BLAST_DB_COMMAND_LIST,
'test_command':TEST_COMMAND,
'program_URL':PROGRAM_URL,
'segment_for_version':SEGMENT_FOR_VERSION,
#TFLOW Writing Defaults, Used if Global Not Set
'write_report':True,
'write_command':True,
'write_pid':True,
}
REQUIRED_SETTINGS = ['blast_command_list', 'blast_db_command_list', 'working_directory',
'copy_input_file', 'blast_evalue', 'max_CPU', 'blast_result_file',
'evalue_cutoffs', 'write_command', 'write_report', 'write_pid',
'reference_type', 'max_matches']
REQUIRED_ANALYSIS_SETTINGS = ['blast_result_file', 'evalue_cutoffs', 'write_report',
'write_all_matches', 'write_best_matches']
OPTIONAL_TRACKING_SETTINGS = ['working_directory', 'blast_result_file']
class Parser(OutputParser):
def set_local_defaults(self):
self.milestones = MILESTONES
self.terminal_flags = TERMINAL_FLAGS
self.failure_flags = FAILURE_FLAGS
self.job_type = JOB_TYPE
def check_queries_processed(self, blast_file_name):
count = 0
if self.shell_tracking:
try:
string_count = subprocess.check_output(['grep', '-c', '# Query:',
blast_file_name])
count = int(string_count.strip())
except subprocess.CalledProcessError:
util.print_warning('Problem with Shell Query Checking? Continuing...')
else:
with open(blast_file_name, 'r') as blast_file:
for line in blast_file:
if line.startswith('# Query:'):
count += 1
return count
def annotation_track(self, options, loud=False):
from time import sleep
if 'verbose_tracking' in options and options['verbose_tracking']:
verbose_tracking = True
#Ensure Required Settings in Options
for optional_option in OPTIONAL_TRACKING_SETTINGS:
if optional_option not in options:
print 'Optional Option: %s for %s tracking not given.' % (optional_option,
JOB_TYPE)
print 'Defaulting to non-verbose tracking.'
verbose_tracking = False
else:
verbose_tracking = False
if verbose_tracking:
print 'Attempting to Initiate Verbose Annotation Tracking:'
print ''
#Ensure Working Directory Exists
if verbose_tracking and not os.path.isdir(options['working_directory']):
print 'Working Directory: %s Not Found.' % options['working_directory']
print 'Defaulting to non-verbose tracking.'
verbose_tracking = False
#Ensure A Type of Query Sequence File is Given
if verbose_tracking and not any(x in options for x in ['absolute_input_analysis_file',
'rel_input_analysis_file',
'input_analysis_file',
'result_name_file']):
print ('Either input_analysis_file, absolute_input_analysis_file,'
+ ' rel_input_analysis_file, or result_name_file paramater required.')
print 'Defaulting to non-verbose tracking.'
verbose_tracking = False
#Assign Correct Input File Name
if verbose_tracking:
if 'input_analysis_file' in options:
if os.path.isabs(options['input_analysis_file']):
full_input_file = options['input_analysis_file']
else:
full_input_file = os.path.join(options['working_directory'],
options['input_analysis_file'])
elif 'absolute_input_analysis_file' in options:
full_input_file = options['absolute_input_analysis_file']
elif 'rel_input_analysis_file' in options:
full_input_file = os.path.join(options['project_directory'],
options['rel_input_analysis_file'])
elif 'result_name_file' in options:
full_result_name_file = os.path.join(options['project_directory'],
options['result_name_file'])
if not os.path.isfile(full_result_name_file):
print('Provided File: %s Containing' % full_result_name_file
+ ' Result Sequence File Name Not Found.')
print 'Defaulting to non-verbose tracking.'
verbose_tracking = False
if verbose_tracking:
rf = open(full_result_name_file, 'r')
full_input_file = rf.read().strip()
rf.close()
if not os.path.isfile(full_input_file):
print('Cannot Find Read Result Sequence File: %s' % full_input_file)
print 'Defaulting to non-verbose tracking.'
verbose_tracking = False
if verbose_tracking:
input_file = os.path.basename(full_input_file)
if not any(x(full_input_file) for x in [util.is_FASTA, util.is_FASTA_GZ]):
print 'File: %s is not a FASTA File.' % full_input_file
print 'Defaulting to non-verbose tracking.'
verbose_tracking = False
if verbose_tracking:
#Set BLAST Output File
blast_file = options['blast_result_file']
full_blast_file = os.path.join(options['working_directory'], blast_file)
#Check that Blast File Exists
if not os.path.isfile(full_blast_file):
print 'Input Reference Sequence File: %s Not Found.' % full_blast_file
print 'Defaulting to non-verbose tracking.'
verbose_tracking = False
if verbose_tracking:
num_query_sequences = util.count_FASTA_all(full_input_file)
print 'Verbose Tracking Successfully Initiated.'
print 'Query Sequences:', num_query_sequences
else:
num_query_sequences = 0
num_queries_processed = 0
new_queries_processed = 0
while self.running:
if verbose_tracking:
new_queries_processed = self.check_queries_processed(full_blast_file)
if self.check_updated() or num_queries_processed != new_queries_processed:
#print 'Updated:', self.check_updated()
#print 'Num_Queries_New:', num_queries_processed != new_queries_processed
self.running = self.check(loud)
if verbose_tracking:
print new_queries_processed, '/', num_query_sequences,
print '(%s)' % util.percent_string(new_queries_processed,
num_query_sequences),
print 'Query Sequences Processed.'
num_queries_processed = new_queries_processed
if self.running:
sleep(self.sleep_time)
def check_done(options):
parser = Parser()
parser.out_file = options['out_file']
failure_exit = (options['mode'] in ['run', 'track'])
return parser.check_completion(failure_exit)
def track(options):
parser = Parser()
parser.out_file = options['out_file']
parser.annotation_track(options)
def read(options):
parser = Parser()
parser.out_file = options['out_file']
parser.read_or_notify()
def stop(options):
job_pid_file = os.path.join(options['working_directory'],
JOB_TYPE + '.auto.pid')
util.stop_TFLOW_process(job_pid_file, JOB_TYPE)
def clean(options):
suffixes = ['.auto.blastx.sh', '.auto.make_db.sh']
files = []
out_files = [options['blast_result_file'], MATCH_PREFIX+'.All.annDB',
MATCH_PREFIX+'.Best.annDB', ANNOTATION_PREFIX+'.All.annDB']
if 'evalue_cutoffs' in options and isinstance(options['evalue_cutoffs'], list):
for evalue_cutoff in options['evalue_cutoffs']:
out_files.append(ANNOTATION_PREFIX + '.' + evalue_cutoff + '.annDB')
if 'db_title' in options:
for suffix in ['.phr', '.pin', '.psq']:
files.append(options['db_title'] + suffix)
remove_outfile = (options['mode'] == 'reset')
util.clean_TFLOW_auto_files(options['job_type'], options['project_directory'],
options['working_directory'], remove_outfile=remove_outfile,
confirm=options['confirm'], suffixes=suffixes, files=files,
out_files=out_files)
def test(options, silent=False):
all_output = ''
for job_type, command_list in [(JOB_TYPE+':BLAST', 'blast_command_list'),
(JOB_TYPE+':Make_Blast_DB', 'blast_db_command_list')]:
try:
process = subprocess.Popen(options[command_list] + [options['test_command']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
process.wait()
output, error = process.communicate()
all_output += output
print ' -- %s Found!' % job_type
except OSError as error:
if silent:
return False
print ('%s Cannot Be Found ' % job_type
+ ' With Shell Command: "%s"' % ' '.join(options[command_list]))
if PROGRAM_URL:
print 'If Not Installed, %s Can be Downloaded From:\n%s' % (JOB_TYPE, PROGRAM_URL)
all_output += 'Error Number: %s\nError Text:\n%s' % (str(error.errno), error.strerror)
return all_output
def run(options):
if __name__ != '__main__' and options['is_pipe']:
out_file_stream = open(options['out_file'], 'w')
terminal_out, terminal_error = sys.stdout, sys.stderr
sys.stdout, sys.stderr = out_file_stream, out_file_stream
#Ensure Required Settings in Options
for required_option in REQUIRED_SETTINGS:
if required_option not in options:
print_exit('Required Option: %s for %s not given.' % (required_option, JOB_TYPE))
#Ensure A Type of Query Sequence File is Given
if not any(x in options for x in ['absolute_input_analysis_file',
'rel_input_analysis_file',
'input_analysis_file',
'result_name_file']):
print_exit('Either input_analysis_file, absolute_input_analysis_file,'
+ ' rel_input_analysis_file, or result_name_file paramater required.')
#Ensure A Type of Reference Sequence File is Given
if not any(x in options for x in ['absolute_input_reference_file',
'rel_input_reference_file',
'input_reference_file,'
'reference_name_file']):
print_exit('Either input_reference_file, absolute_input_reference_file,'
+ ' rel_input_reference_file, or reference_name_file paramater required.')
#Ensure Working Directory Exists
if not os.path.isdir(options['working_directory']):
print_exit('Working Directory: %s Not Found.' % options['working_directory'])
#Assign Correct Input File Name
if 'input_analysis_file' in options:
if os.path.isabs(options['input_analysis_file']):
full_input_file = options['input_analysis_file']
else:
full_input_file = os.path.join(options['working_directory'],
options['input_analysis_file'])
elif 'absolute_input_analysis_file' in options:
full_input_file = options['absolute_input_analysis_file']
elif 'rel_input_analysis_file' in options:
full_input_file = os.path.join(options['project_directory'],
options['rel_input_analysis_file'])
elif 'result_name_file' in options:
full_result_name_file = os.path.join(options['project_directory'],
options['result_name_file'])
if os.path.isfile(full_result_name_file):
print ('Reading Result Sequence File Name from Provided '
+ 'File: %s' % full_result_name_file )
else:
print_exit('Provided File: %s Containing' % full_result_name_file
+ ' Result Sequence File Name Not Found.')
rf = open(full_result_name_file, 'r')
full_input_file = rf.read().strip()
rf.close()
if os.path.isfile(full_input_file):
print 'Read Result Sequence File Name: %s' % full_input_file
print 'File Found!'
print ''
else:
print_exit('Cannot Find Read Result Sequence File: %s' % full_input_file)
input_file = os.path.basename(full_input_file)
#Assign Correct Reference File Name
if 'input_reference_file' in options:
if os.path.isabs(options['input_reference_file']):
full_reference_file = options['input_reference_file']
else:
full_reference_file = os.path.join(options['working_directory'],
options['input_refernece_file'])
elif 'absolute_input_reference_file' in options:
full_reference_file = options['absolute_input_reference_file']
elif 'rel_input_reference_file' in options:
full_reference_file = os.path.join(options['project_directory'],
options['rel_input_reference_file'])
elif 'reference_name_file' in options:
full_reference_name_file = os.path.join(options['project_directory'],
options['reference_name_file'])
if os.path.isfile(full_reference_name_file):
print ('Reading Reference Sequence File Name from Provided '
+ 'File: %s' % full_reference_name_file )
else:
print_exit('Provided File: %s Containing' % full_reference_name_file
+ ' Reference Sequence File Name Not Found.')
rf = open(full_result_name_file, 'r')
full_reference_file = rf.read().strip()
rf.close()
if os.path.isfile(full_reference_file):
print 'Read Result Reference Sequence File Name: %s' % full_reference_file
print 'File Found!'
print ''
else:
print_exit('Cannot Find Read Reference Sequence File: %s' % full_reference_file)
reference_file = os.path.basename(full_reference_file)
#If Selected Reference File is Zipped, Unzip it
if (full_reference_file.endswith('.gz') and os.path.isfile(full_reference_file)
and not os.path.isfile_full_reference_file[:-3])
print '\nSelected Reference File: %s is Zipped.' % full_reference_file
print 'Unzipping...'
print ''
sys.stdout.flush()
with (gzip.open(full_reference_file, 'r') as zipped_reference,
open(full_reference_file[:-3], 'w') as unzipped_reference):
unzipped_reference.writelines(zipped_reference)
print ('Unzipping Complete. Setting Reference File to '
+'Unzipped File: %s' full_reference_file[:-3])
print ''
full_reference_file = full_reference_file[:-3]
#Check that Input File Exists
if not os.path.isfile(full_input_file):
print_exit('Input Sequence File: %s Not Found.' % full_input_file)
#Check that Reference File Exists
if not os.path.isfile(full_reference_file):
print_exit('Input Reference Sequence File: %s Not Found.' % full_reference_file)
#If Selected, Copy Input File to Working Directory
if options['copy_input_file']:
print ('Copying Input File: %s' % input_file
+ ' to Working Directory: %s' % options['working_directory'])
working_input_file = os.path.join(options['working_directory'], input_file)
shutil.copyfile(full_input_file, working_input_file)
if not os.path.isfile(working_input_file):
print_exit('Copying of File: %s to Name: %s Unsuccesful.' % (full_input_file,
working_input_file))
else:
print 'Using Input File: %s' % full_input_file
working_input_file = full_input_file
print ('Input File Has %i ' % util.count_FASTA_all(working_input_file) +
' Detected Query Sequences.')
if options['reference_type'] in REFERENCE_TYPES:
print 'Reference Type: %s (%s) Selected.' % (options['reference_type'],
REFERENCE_TYPES[options['reference_type']])
ref_type = REFERENCE_TYPES[options['reference_type']]
else:
print_exit(['Reference Type: %s Not Allowed.' % options['reference_type'],
'Options: %s' % ', '.join(REFERENCE_TYPES.keys())])
if 'db_title' in options:
print 'Setting Database Title to: %s' % options['db_title']
db_title = options['db_title']
else:
db_title = 'BLAST_DB'
#Prepare Blast Database
db_command_list = options['blast_db_command_list'][:]
db_command_list += ['-in', full_reference_file, '-dbtype', ref_type, '-title', db_title,
'-out', db_title]
db_command = ' '.join(db_command_list)
if options['write_command']:
command_file = os.path.join(options['working_directory'],
JOB_TYPE + '.auto.make_db.sh')
util.write_file(command_file, '#!/bin/sh\n' + db_command)
print ''
print 'Running Command:\n ' + db_command
sys.stdout.flush()
try:
process = subprocess.Popen(db_command_list, stdout=sys.stdout, stderr=sys.stderr,
cwd=options['working_directory'])
if options['write_pid']:
pid_file_name = os.path.join(options['working_directory'],
options['job_type'] + '.auto.pid')
util.write_file(pid_file_name, str(process.pid))
process.wait()
if options['write_pid']:
util.delete_pid_file(pid_file_name)
sys.stdout.flush()
except KeyboardInterrupt:
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
print 'Killing %s Process.' % JOB_TYPE
process.kill()
raise
print ''
print 'Looking For Maximum Number of Annotations: %s' % options['max_matches']
#Prepare BLAST Sequence Comparison Command
command_list = list(options['blast_command_list'])
command_list += ['-db', db_title, '-query', full_input_file, '-outfmt', '7', '-evalue',
options['blast_evalue'], '-num_threads', options['max_CPU'],
'-max_target_seqs', str(options['max_matches']), '-out',
options['blast_result_file']]
command = ' '.join(command_list)
#If Selected, Write Command to File
if options['write_command']:
command_file = os.path.join(options['working_directory'], JOB_TYPE + '.auto.blastx.sh')
util.write_file(command_file, '#!/bin/sh\n' + command)
#Perform BLAST Sequence Comparisons
print ''
print 'Running Command:\n ' + command
sys.stdout.flush()
try:
process = subprocess.Popen(command_list, stdout=sys.stdout, stderr=sys.stderr,
cwd=options['working_directory'])
if options['write_pid']:
pid_file_name = os.path.join(options['working_directory'],
options['job_type'] + '.auto.pid')
util.write_file(pid_file_name, str(process.pid))
process.wait()
if options['write_pid']:
util.delete_pid_file(pid_file_name)
sys.stdout.flush()
except KeyboardInterrupt:
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
print 'Killing %s Process.' % JOB_TYPE
process.kill()
raise
print ''
print 'Blast Completed with Out File: %s' % options['blast_result_file']
print ''
analyze(options)
print ''
print 'Annotation Complete'
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
#Analyze Results of Sequence Comparison
def analyze(options):
analysis = print_return(['Performing Annotation Analysis on BLAST Result.', ''])
#Ensure Required Settings in Options
for required_option in REQUIRED_ANALYSIS_SETTINGS:
if required_option not in options:
print_exit('Required Option: %s for %s analysis not given.' % (required_option,
JOB_TYPE))
#Ensure Working Directory Exists
if not os.path.isdir(options['working_directory']):
print_exit('Working Directory: %s Not Found.' % options['working_directory'])
#Ensure A Type of Query Sequence File is Given
if not any(x in options for x in ['absolute_input_analysis_file',
'rel_input_analysis_file',
'input_analysis_file',
'result_name_file']):
print_exit('Either input_analysis_file, absolute_input_analysis_file,'
+ ' rel_input_analysis_file, or result_name_file paramater required.')
#Ensure A Type of Reference Sequence File is Given
if not any(x in options for x in ['absolute_input_reference_file',
'rel_input_reference_file',
'input_reference_file,'
'reference_name_file']):
print_exit('Either input_reference_file, absolute_input_reference_file,'
+ ' rel_input_reference_file, or reference_name_file paramater required.')
#Ensure Working Directory Exists
if not os.path.isdir(options['working_directory']):
print_exit('Working Directory: %s Not Found.' % options['working_directory'])
#Assign Correct Input File Name
if 'input_analysis_file' in options:
if os.path.isabs(options['input_analysis_file']):
full_input_file = options['input_analysis_file']
else:
full_input_file = os.path.join(options['working_directory'],
options['input_analysis_file'])
elif 'absolute_input_analysis_file' in options:
full_input_file = options['absolute_input_analysis_file']
elif 'rel_input_analysis_file' in options:
full_input_file = os.path.join(options['project_directory'],
options['rel_input_analysis_file'])
elif 'result_name_file' in options:
full_result_name_file = os.path.join(options['project_directory'],
options['result_name_file'])
if not os.path.isfile(full_result_name_file):
print_exit('Provided File: %s Containing' % full_result_name_file
+ ' Result Sequence File Name Not Found.')
rf = open(full_result_name_file, 'r')
full_input_file = rf.read().strip()
rf.close()
if not os.path.isfile(full_input_file):
print_exit('Cannot Find Read Result Sequence File: %s' % full_input_file)
input_file = os.path.basename(full_input_file)
#Assign Correct Reference File Name
if 'input_reference_file' in options:
if os.path.isabs(options['input_reference_file']):
full_reference_file = options['input_reference_file']
else:
full_reference_file = os.path.join(options['working_directory'],
options['input_refernece_file'])
elif 'absolute_input_reference_file' in options:
full_reference_file = options['absolute_input_reference_file']
elif 'rel_input_reference_file' in options:
full_reference_file = os.path.join(options['project_directory'],
options['rel_input_reference_file'])
elif 'reference_name_file' in options:
full_reference_name_file = os.path.join(options['project_directory'],
options['reference_name_file'])
if not os.path.isfile(full_reference_name_file):
print_exit('Provided File: %s Containing' % full_reference_name_file
+ ' Reference Sequence File Name Not Found.')
rf = open(full_result_name_file, 'r')
full_reference_file = rf.read().strip()
rf.close()
if not os.path.isfile(full_reference_file):
print_exit('Cannot Find Read Reference Sequence File: %s' % full_reference_file)
reference_file = os.path.basename(full_reference_file)
#Check that Input File Exists
if not os.path.isfile(full_input_file):
print_exit('Input Sequence File: %s Not Found.' % full_input_file)
#Check that Reference File Exists
if not os.path.isfile(full_reference_file):
print_exit('Input Reference Sequence File: %s Not Found.' % full_reference_file)
blast_file = options['blast_result_file']
full_blast_file = os.path.join(options['working_directory'], blast_file)
#Check that Blast File Exists
if not os.path.isfile(full_blast_file):
print_exit('Input Reference Sequence File: %s Not Found.' % full_blast_file)
if 'name_map_file' in options and options['name_map_file']:
if os.path.isabs(options['name_map_file']):
full_name_map_file = options['name_map_file']
name_map_file = os.path.basename(full_name_map_file)
else:
name_map_file = options['name_map_file']
full_name_map_file = os.path.join(options['working_directory'], name_map_file)
analysis += print_return(['Reading Name Map File:', ' %s' % full_name_map_file])
else:
full_name_map_file = None
name_map_file = None
#If Provided, Check that Name Map File Exists
if name_map_file:
if not os.path.isfile(full_blast_file):
print_exit('Provided Name Mapping File: %s Not Found.' % full_name_map_file)
db = Annotation_Database()
analysis += print_return(['Beginning Annotation...', ''])
#Read # of Sequences in Input File
input_sequence_count = util.count_FASTA_all(full_input_file)
analysis += print_return(['Total Sequences in input file %s:' % full_input_file
+ ' %i ' % input_sequence_count, ''])
#Read # of Lines in File
with open(full_blast_file) as blast_file_object:
for total_lines, line in enumerate(blast_file_object, start=1):
pass
analysis += print_return(['Total Lines in file %s: %s ' % (full_blast_file, str(total_lines)),
''])
#Read Blast File Outputs and Count Genes Found Over Threshold
blast_file_object = open(full_blast_file, 'r')
print_counter = 0
NUM_PRINTS = 1000
print_counter_threshold = total_lines/NUM_PRINTS
db_len = 0
for (line_number, line) in enumerate(blast_file_object, start=1):
print_counter += 1
if line.startswith('#'):
continue
split_line = line.split()
if not split_line:
print_exit('Blank Line Found in Blast Results File at Line Number %i' % line_number)
elif len(split_line) < 11:
print_exit([('Problem with formatting of line number %i ' % line_number
+ 'in blast results file: %s' % full_blast), 'Line:', line.strip()])
query_sequence = split_line[0]
match_sequence = split_line[1]
e_score_string = split_line[10]
e_score = float(e_score_string)
record = Annotation_Record(ID=match_sequence, eVal=e_score, fileName=reference_file,
annotation=None)
db.add_record(query_sequence, record)
if print_counter >= print_counter_threshold:
db_len = len(db)
print ('\r%s Matched Sequences Found,' % (str(db_len))
+ ' %s complete. ' % util.percent_string(line_number, total_lines)),
sys.stdout.flush()
print_counter = (print_counter_threshold - print_counter)
print '\r' + ' ' * 79 + '\r',
analysis += print_return('%s Matched Sequences Found.' % (str(db_len)))
blast_file_object.close()
if options['write_all_matches']:
analysis += print_return('Writing All Sequence Matches to File: ' +
MATCH_PREFIX + '.All.annDB')
sys.stdout.flush()
db.write(MATCH_PREFIX + '.All.annDB')
annotation_count, record_count = db.count()
analysis += print_return(['%i Sequences ' % annotation_count
+ ' with %i Total Matches Written.' % record_count, ''])
db.cull(subset='best')
if options['write_best_matches']:
analysis += print_return('Writing Best Sequence Matches to File: ' +
MATCH_PREFIX + '.Best.annDB')
sys.stdout.flush()
db.write(MATCH_PREFIX + '.Best.annDB')
annotation_count, record_count = db.count()
analysis += print_return(['%i Sequences ' % annotation_count
+ ' with %i Total Matches Written.' % record_count, ''])
if name_map_file:
input_sequence_count = 0
analysis += print_return('Reading Provided Name Map: %s' % full_name_map)
name_map = Name_Map(full_name_map)
analysis += print_return('Remapping Sequence Names to Name Map...')
db.map_names(name_map, debug=False)
annotation_count, record_count = db.count()
analysis += print_return(['%i Sequences Remapped ' % annotation_count
+ 'with %i Matches.'% record_count, ''])
if options['write_best_matches']:
analysis += print_return('Writing Best Matches to File: ' +
MATCH_PREFIX + '.Remapped.Best.annDB')
sys.stdout.flush()
db.write(MATCH_PREFIX + '.Remapped.Best.annDB')
annotation_count, record_count = db.count()
analysis += print_return(['%i Sequences ' % annotation_count
+ ' with %i Total Matches Written.' % record_count, ''])
analysis += print_return('Reading Annotation Strings from Reference File: %s' % full_reference_file)
annotation_map = Annotation_Map(full_reference_file)
analysis += print_return('Mapping Annotations to Matches...')
map_counts = db.map_annotations(annotation_map)
analysis += print_return(['%i Total Annotations Mapped.' % map_counts, ''])
analysis += print_return('Writing All Annotations to File: ' +
ANNOTATION_PREFIX + '.All.annDB')
sys.stdout.flush()
db.write(ANNOTATION_PREFIX + '.All.annDB')
annotation_count, record_count = db.count()
analysis += print_return(['%i Sequences ' % annotation_count
+ ' with %i Total Matches Written.' % record_count, ''])
thresholds = options['evalue_cutoffs']
last_threshold = 10
for threshold in thresholds:
if float(threshold) >= float(last_threshold):
print_exit(['Thresholds: %s ' %', '.join(thresholds)
+'Must Be in Descending Order.',
'(1e-10, then 1e-20, then 1e-40, etc.)'])
else:
last_threshold = threshold
analysis += print_return(['Analyzing Annotations for Evalue Thresholds: '
+ ', '.join(options['evalue_cutoffs']), ''])
report_dicts = []
formatted_reports = []
for threshold in thresholds:
(ini_seqs, ini_records, final_seqs, final_records) = db.cull(threshold=threshold)
analysis += print_return('Writing Theshold %s Annotations to File: ' % threshold +
ANNOTATION_PREFIX + '.' + threshold + '.annDB')
sys.stdout.flush()
db.write(ANNOTATION_PREFIX + '.' + threshold + '.annDB')
annotation_count, record_count = db.count()
analysis += print_return(['%i Sequences' % annotation_count
+ ' with %i Total Matches Written.' % record_count, ''])
if input_sequence_count:
formatted_input_sequence_count = str(input_sequence_count)
percent = util.percent_string(final_seqs, input_sequence_count)
else:
formatted_input_sequence_count = '-'
percent = 'N/A%'
headers = ['Analys.', 'Cutoff', 'TotSqs.', 'AnnSqs.', 'Percent', 'TotAnn.', 'Remapd.']
data_grid = ['Annot.', threshold, formatted_input_sequence_count, final_seqs,
percent, final_records, bool(name_map_file)]
formatted_data = [str(x) for x in data_grid]
report_dict = dict(zip(headers, formatted_data))
report_dict['report_type'] = 'annotation'
report_dicts.append(report_dict)
formatted_reports.append(formatted_data)
analysis += print_return(['', 'Tab Separated Output:', '\t'.join(headers)])
for formatted_report in formatted_reports:
analysis += print_return('\t'.join(formatted_report))
analysis += print_return('')
#If Selected, Write Analysis Report
if options['write_report']:
report_file = os.path.join(options['working_directory'],
JOB_TYPE + '.report')
if len(report_dicts) > 1:
aux_reports = report_dicts[1:]
else:
aux_reports = []
util.write_report(report_file, report_dict, aux_reports=aux_reports)
return analysis
|
FSUgenomics/TFLOW
|
tflow/segments/Prototype_Find_Annotations.py
|
Python
|
gpl-2.0
| 38,150
|
[
"BLAST"
] |
9e90862ffba083a41f11a0eedb61a86e1f6da0922b5997892fa7e42a6cc7e0ea
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
#import warnings
class warnings:None
warnings.warn=lambda x:1
import warnings
import numpy as np
from spectral_embedding_ import spectral_embedding
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_random_state, as_float_array
from sklearn.utils.validation import check_array
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.neighbors import kneighbors_graph, NearestNeighbors
from sklearn.cluster import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance or None (default)
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
# The first eigen vector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(ClusterMixin, BaseEstimator):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
n_clusters : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
n_components : integer, optional, default=n_clusters
Number of eigen vectors to use for the spectral embedding
random_state : int, RandomState instance or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when ``eigen_solver='amg'`` and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : string or callable, default 'rbf'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix using a radial basis function
(RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- one of the kernels supported by
:func:`~sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when ``eigen_solver='arpack'``.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ : array, shape (n_samples,)
Labels of each point
Examples
--------
>>> from sklearn.cluster import SpectralClustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralClustering(n_clusters=2,
... assign_labels="discretize",
... random_state=0).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering
SpectralClustering(assign_labels='discretize', n_clusters=2,
random_state=0)
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, n_components=None,
random_state=None, n_init=10, gamma=1., affinity='rbf',
n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans',
degree=3, coef0=1, kernel_params=None, n_jobs=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.n_components = n_components
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features), or \
array-like, shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse matrix is
provided in a format other than ``csr_matrix``, ``csc_matrix``,
or ``coo_matrix``, it will be converted into a sparse
``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, ensure_min_samples=2)
allow_squared = self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors,
include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed_nearest_neighbors':
estimator = NearestNeighbors(n_neighbors=self.n_neighbors,
n_jobs=self.n_jobs,
metric="precomputed").fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode='connectivity')
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
def fit_predict(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix,
and return cluster labels.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features), or \
array-like, shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse matrix is
provided in a format other than ``csr_matrix``, ``csc_matrix``,
or ``coo_matrix``, it will be converted into a sparse
``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray, shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
@property
def _pairwise(self):
return self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
|
wolfiex/DSMACC-testing
|
dsmacc/examples/_spectral.py
|
Python
|
gpl-3.0
| 22,357
|
[
"Brian",
"Gaussian"
] |
c131f1b9fa864aa018ea78f3bc7d85efff47adbefab85d58867e6a72c07e527b
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
from scipy import linalg
from ..preprocessing import MultiLabelBinarizer
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
return_indicator=False, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/datasets/samples_generator.py
|
Python
|
apache-2.0
| 52,427
|
[
"Gaussian"
] |
49445f2caefe5611b22fa877921dbe30198fe3d09a6697dbd69cd9f3a49a9216
|
#!/usr/bin/env python
import matplotlib
matplotlib.rcParams['legend.fancybox'] = True
import matplotlib.pyplot as plt
import numpy as np
#define some data
x = [32, 5032, 10032, 15032, 20032]
pc_mean = [0.946, 3.014, 3.903, 4.228, 4.539]
s3mini_mean = [2.764, 59.412, 30.321, 86.975, 183.882]
s3_mean = [1.649, 36.336, 16.471, 45.387, 97.151]
nexus10_mean = [5.147, 14.171, 9.024, 17.944, 32.070]
#error data
pc_error = [0.017, 0.034, 0.052, 0.036, 0.042]
s3mini_error = [0.127, 0.708, 0.347, 1.108, 2.101]
s3_error = [0.076, 0.668, 0.288, 0.729, 1.208]
nexus10_error = [0.205, 0.525, 0.291, 0.496, 0.588]
#plt.subplot(121)
#plot data
p1, = plt.plot(x, pc_mean, linestyle="dashed", marker="^", color="red", label="Mean for a PC")
p2, = plt.plot(x, s3mini_mean, linestyle="-", marker="^", color="blue", label="Mean for a Samsung Galaxy SIII Mini")
p3, = plt.plot(x, s3_mean, linestyle=":", marker="^", color="green", label="Mean for a Samsung Galaxy SIII")
p4, = plt.plot(x, nexus10_mean, linestyle="-", marker="^", color="orange", label="Mean for a Nexus 10")
#legend
lgd = plt.legend([p1, p2, p3, p4], ['Mean for a PC', 'Mean for a Samsung Galaxy SIII Mini', 'Mean for a Samsung Galaxy SIII', 'Mean for a Nexus 10'], bbox_to_anchor=[0.5, -0.1],
loc='upper center', ncol=1, borderaxespad=0.25)
#plot only errorbars
plt.errorbar(x, pc_mean, yerr=pc_error, linestyle="None", marker="None", color="green")
plt.errorbar(x, s3mini_mean, yerr=s3mini_error, linestyle="None", marker="None", color="green")
plt.errorbar(x, s3_mean, yerr=s3_error, linestyle="None", marker="None", color="green")
plt.errorbar(x, nexus10_mean, yerr=nexus10_error, linestyle="None", marker="None", color="green")
#configure X axes
plt.xlim(32,20000)
plt.xticks([32, 5032, 10032, 15032, 20032])
#configure Y axes
plt.ylim(0.0, 5.0)
plt.yticks([0.0, 50, 100, 150, 200])
#labels
plt.xlabel('\n' + 'ABox axioms')
plt.ylabel('Time in seconds' + '\n')
#title
plt.title('Increasing the number of ABox axioms using Pellet for Java in a PC' + '\n')
#show plot with data
for t, a in zip(x, pc_mean):
plt.plot([t], [a], 'b',)
plt.annotate(round(a, 3), xy=(t, a), xytext=(t + 0.01, a + 0.01), color='black')
for t, a in zip(x, s3mini_mean):
plt.plot([t], [a], 'b',)
plt.annotate(round(a, 3), xy=(t, a), xytext=(t + 0.01, a + 0.01), color='black')
for t, a in zip(x, s3_mean):
plt.plot([t], [a], 'b',)
plt.annotate(round(a, 3), xy=(t, a), xytext=(t + 0.01, a + 0.01), color='black')
for t, a in zip(x, nexus10_mean):
plt.plot([t], [a], 'b',)
plt.annotate(round(a, 3), xy=(t, a), xytext=(t + 0.01, a + 0.01), color='black')
# Pad margins so that markers don't get clipped by the axes
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
#pm = __import__('text_positions')
#print(dir(pm)) # just for fun :)
#save plot
plt.savefig('pellet_abox.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.show()
|
edlectrico/dissertation
|
graphics/pellet_abox.py
|
Python
|
apache-2.0
| 2,927
|
[
"Galaxy"
] |
79d10999b5efdab7c084a92bb802827ecab5d0b482f1353ce6190b2ebb8533d9
|
#
# (c) 2015 Brian Ccoa, <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module adds shared support for generic api modules
In order to use this module, include it as part of a custom
module as shown below.
** Note: The order of the import statements does matter. **
from ansible.module_utils.basic import *
from ansible.module_utils.api import *
The 'api' module provides the following common argument specs:
* rate limit spec
- rate: number of requests per time unit (int)
- rate_limit: time window in which the limit is applied in seconds
* retry spec
- retries: number of attempts
- retry_pause: delay between attempts in seconds
"""
import time
def rate_limit_argument_spec(spec=None):
"""Creates an argument spec for working with rate limiting"""
arg_spec = (dict(
rate=dict(type='int'),
rate_limit=dict(type='int'),
))
if spec:
arg_spec.update(spec)
return arg_spec
def retry_argument_spec(spec=None):
"""Creates an argument spec for working with retrying"""
arg_spec = (dict(
retries=dict(type='int'),
retry_pause=dict(type='float', default=1),
))
if spec:
arg_spec.update(spec)
return arg_spec
def basic_auth_argument_spec(spec=None):
arg_spec = (dict(
api_username=dict(type='str', required=False),
api_password=dict(type='str', required=False, no_log=True),
api_url=dict(type='str', required=False),
validate_certs=dict(type='bool', default=True)
))
if spec:
arg_spec.update(spec)
return arg_spec
def rate_limit(rate=None, rate_limit=None):
"""rate limiting decorator"""
minrate = None
if rate is not None and rate_limit is not None:
minrate = float(rate_limit) / float(rate)
def wrapper(f):
last = [0.0]
def ratelimited(*args,**kwargs):
if minrate is not None:
elapsed = time.clock() - last[0]
left = minrate - elapsed
if left > 0:
time.sleep(left)
last[0] = time.clock()
ret = f(*args,**kwargs)
return ret
return ratelimited
return wrapper
def retry(retries=None, retry_pause=1):
"""Retry decorator"""
def wrapper(f):
retry_count = 0
def retried(*args,**kwargs):
if retries is not None:
ret = None
while True:
retry_count += 1
if retry_count >= retries:
raise Exception("Retry limit exceeded: %d" % retries)
try:
ret = f(*args,**kwargs)
except:
pass
if ret:
break
time.sleep(retry_pause)
return ret
return retried
return wrapper
|
camradal/ansible
|
lib/ansible/module_utils/api.py
|
Python
|
gpl-3.0
| 3,560
|
[
"Brian"
] |
7e1a9b7f07a5fc48206a923d35b6cfe5edc3c73035011bc040204e21af452e12
|
from collections import deque
import time
import requests
# Constants
BRAZIL = 'br'
EUROPE_NORDIC_EAST = 'eune'
EUROPE_WEST = 'euw'
KOREA = 'kr'
LATIN_AMERICA_NORTH = 'lan'
LATIN_AMERICA_SOUTH = 'las'
NORTH_AMERICA = 'na'
OCEANIa = 'oce'
RUSSIA = 'ru'
TURKEY = 'tr'
queue_types = [
'CUSTOM', # Custom games
'NORMAL_5x5_BLIND', # Normal 5v5 blind pick
'BOT_5x5', # Historical Summoners Rift coop vs AI games
'BOT_5x5_INTRO', # Summoners Rift Intro bots
'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games
'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games
'NORMAL_3x3', # Normal 3v3 games
'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games
'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games
'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games
'BOT_ODIN_5x5', # Dominion Coop vs AI games
'RANKED_SOLO_5x5', # Ranked Solo 5v5 games
'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games
'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games
'RANKED_TEAM_3x3', # Ranked Team 3v3 games
'RANKED_TEAM_5x5', # Ranked Team 5v5 games
'BOT_TT_3x3', # Twisted Treeline Coop vs AI games
'GROUP_FINDER_5x5', # Team Builder games
'ARAM_5x5', # ARAM games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games
'SR_6x6', # Hexakill games
'URF_5x5', # Ultra Rapid Fire games
'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games
'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games
'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games
'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games
]
game_maps = [
{'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"},
{'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"},
{'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"},
{'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"},
{'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"},
{'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"},
{'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"},
]
game_modes = [
'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games
'ODIN', # Dominion/Crystal Scar games
'ARAM', # ARAM games
'TUTORIAL', # Tutorial games
'ONEFORALL', # One for All games
'FIRSTBLOOD', # Snowdown Showdown games
]
game_types = [
'CUSTOM_GAME', # Custom games
'TUTORIAL_GAME', # Tutorial games
'MATCHED_GAME', # All other games
]
sub_types = [
'NONE', # Custom games
'NORMAL', # Summoner's Rift unranked games
'NORMAL_3x3', # Twisted Treeline unranked games
'ODIN_UNRANKED', # Dominion/Crystal Scar games
'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games
'BOT', # Summoner's Rift and Crystal Scar games played against AI
'BOT_3x3', # Twisted Treeline games played against AI
'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games
'RANKED_TEAM_3x3', # Twisted Treeline ranked team games
'RANKED_TEAM_5x5', # Summoner's Rift ranked team games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games
'SR_6x6', # Hexakill games
'CAP_5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URF_BOT', # Ultra Rapid Fire games against AI
'NIGHTMARE_BOT', # Nightmare bots
]
player_stat_summary_types = [
'Unranked', # Summoner's Rift unranked games
'Unranked3x3', # Twisted Treeline unranked games
'OdinUnranked', # Dominion/Crystal Scar games
'AramUnranked5x5', # ARAM / Howling Abyss games
'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI
'CoopVsAI3x3', # Twisted Treeline games played against AI
'RankedSolo5x5', # Summoner's Rift ranked solo queue games
'RankedTeams3x3', # Twisted Treeline ranked team games
'RankedTeams5x5', # Summoner's Rift ranked team games
'OneForAll5x5', # One for All games
'FirstBlood1x1', # Snowdown Showdown 1x1 games
'FirstBlood2x2', # Snowdown Showdown 2x2 games
'SummonersRift6x6', # Hexakill games
'CAP5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URFBots', # Ultra Rapid Fire games played against AI
]
solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3'
api_versions = {
'champion': 1.2,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'match': 2.2,
'matchhistory': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
class LoLException(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return self.error
error_400 = LoLException("Bad request")
error_401 = LoLException("Unauthorized")
error_404 = LoLException("Game data not found")
error_429 = LoLException("Too many requests")
error_500 = LoLException("Internal server error")
error_503 = LoLException("Service unavailable")
def raise_status(response):
if response.status_code == 400:
raise error_400
elif response.status_code == 401:
raise error_401
elif response.status_code == 404:
raise error_404
elif response.status_code == 429:
raise error_429
elif response.status_code == 500:
raise error_500
elif response.status_code == 503:
raise error_503
else:
response.raise_for_status()
class RateLimit:
def __init__(self, allowed_requests, seconds):
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotWatcher:
def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
self.key = key
self.default_region = default_region
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
def base_request(self, url, region, static=False, **kwargs):
if region is None:
region = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format(
proxy='global' if static else region,
static='static-data/' if static else '',
region=region,
url=url
),
params=args
)
if not static:
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
# champion-v1.2
def _champion_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/champion/{end_url}'.format(
version=api_versions['champion'],
end_url=end_url
),
region,
**kwargs
)
def get_all_champions(self, region=None, free_to_play=False):
return self._champion_request('', region, freeToPlay=free_to_play)
def get_champion(self, champion_id, region=None):
return self._champion_request('{id}'.format(id=champion_id), region)
# game-v1.3
def _game_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/game/{end_url}'.format(
version=api_versions['game'],
end_url=end_url
),
region,
**kwargs
)
def get_recent_games(self, summoner_id, region=None):
return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region)
# league-v2.5
def _league_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/league/{end_url}'.format(
version=api_versions['league'],
end_url=end_url
),
region,
**kwargs
)
def get_league(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
else:
return self._league_request(
'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_league_entry(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}/entry'.format(
summoner_ids=','.join([str(s) for s in summoner_ids])
),
region
)
else:
return self._league_request(
'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_challenger(self, region=None, queue=solo_queue):
return self._league_request('challenger', region, type=queue)
# lol-static-data-v1.2
def _static_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/{end_url}'.format(
version=api_versions['lol-static-data'],
end_url=end_url
),
region,
static=True,
**kwargs
)
def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None):
return self._static_request(
'champion',
region,
locale=locale,
version=version,
dataById=data_by_id,
champData=champ_data
)
def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None):
return self._static_request(
'champion/{id}'.format(id=champ_id),
region,
locale=locale,
version=version,
champData=champ_data
)
def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None):
return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data)
def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None):
return self._static_request(
'item/{id}'.format(id=item_id),
region,
locale=locale,
version=version,
itemData=item_data
)
def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None):
return self._static_request(
'mastery',
region,
locale=locale,
version=version,
masteryListData=mastery_list_data
)
def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None):
return self._static_request(
'mastery/{id}'.format(id=mastery_id),
region,
locale=locale,
version=version,
masteryData=mastery_data
)
def static_get_realm(self, region=None):
return self._static_request('realm', region)
def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None):
return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data)
def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None):
return self._static_request(
'rune/{id}'.format(id=rune_id),
region,
locale=locale,
version=version,
runeData=rune_data
)
def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None):
return self._static_request(
'summoner-spell',
region,
locale=locale,
version=version,
dataById=data_by_id,
spellData=spell_data
)
def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None):
return self._static_request(
'summoner-spell/{id}'.format(id=spell_id),
region,
locale=locale,
version=version,
spellData=spell_data
)
def static_get_versions(self, region=None):
return self._static_request('versions', region)
# match-v2.2
def _match_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/match/{end_url}'.format(
version=api_versions['match'],
end_url=end_url
),
region,
**kwargs
)
def get_match(self, match_id, region=None, include_timeline=False):
return self._match_request(
'{match_id}'.format(match_id=match_id),
region,
includeTimeline=include_timeline
)
# matchhistory-v2.2
def _match_history_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchhistory/{end_url}'.format(
version=api_versions['matchhistory'],
end_url=end_url
),
region,
**kwargs
)
def get_match_history(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, begin_index=None, end_index=None):
return self._match_history_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championIds=champion_ids,
rankedQueues=ranked_queues,
beginIndex=begin_index,
endIndex=end_index
)
# stats-v1.3
def _stats_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/stats/{end_url}'.format(
version=api_versions['stats'],
end_url=end_url
),
region,
**kwargs
)
def get_stat_summary(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None)
def get_ranked_stats(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None
)
# summoner-v1.4
def _summoner_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/summoner/{end_url}'.format(
version=api_versions['summoner'],
end_url=end_url
),
region,
**kwargs
)
def get_mastery_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_rune_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_summoners(self, names=None, ids=None, region=None):
if (names is None) != (ids is None):
return self._summoner_request(
'by-name/{summoner_names}'.format(summoner_names=','.join(names)) if names is not None
else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])),
region
)
else:
return None
def get_summoner(self, name=None, id=None, region=None):
if (name is None) != (id is None):
if name is not None:
return self.get_summoners(names=[name, ], region=region)[name]
else:
return self.get_summoners(ids=[id, ], region=region)[str(id)]
return None
def get_summoner_name(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
# team-v2.4
def _team_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/team/{end_url}'.format(
version=api_versions['team'],
end_url=end_url
),
region,
**kwargs
)
def get_teams_for_summoner(self, summoner_id, region=None):
return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)]
def get_teams_for_summoners(self, summoner_ids, region=None):
return self._team_request(
'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])),
region
)
def get_team(self, team_id, region=None):
return self.get_teams([team_id, ], region=region)[str(team_id)]
def get_teams(self, team_ids, region=None):
return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
|
yilinhe/MOBA-TeamCompDecoding
|
lol-data-collection/riotwatcher.py
|
Python
|
mit
| 18,933
|
[
"CRYSTAL"
] |
a04f7096a341b9f80cd574ac8d4d48776732168e09bbcd5511ce64bfb0769a53
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
import re
import struct
from collections import defaultdict
from decimal import Decimal
from .pdict import PreservingDict
from .periodictable import *
from .physconst import *
from .exceptions import *
from .molecule import Molecule
from .orient import OrientMols
from .options import conv_float2negexp
def harvest_output(outtext):
"""Function to separate portions of a CFOUR output file *outtest*,
divided by xjoda.
"""
pass_psivar = []
pass_coord = []
pass_grad = []
for outpass in re.split(r'--invoking executable xjoda', outtext, re.MULTILINE):
psivar, c4coord, c4grad = harvest_outfile_pass(outpass)
pass_psivar.append(psivar)
pass_coord.append(c4coord)
pass_grad.append(c4grad)
#print '\n\nXXXXXXXXXXXXXXXXXXXXXXXXXX\n\n'
#print outpass
#print psivar, c4coord, c4grad
#print psivar, c4grad
#print '\n\nxxxxxxxxxxxxxxxxxxxxxxxxxx\n\n'
retindx = -1 if pass_coord[-1] else -2
# print ' <<< C4 PSIVAR >>>'
# for item in pass_psivar[retindx]:
# print(' %30s %16.8f' % (item, pass_psivar[retindx][item]))
# print ' <<< C4 COORD >>>'
# for item in pass_coord[retindx]:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# print ' <<< C4 GRAD >>>'
# for item in pass_grad[retindx]:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
return pass_psivar[retindx], pass_coord[retindx], pass_grad[retindx]
def harvest_outfile_pass(outtext):
"""Function to read CFOUR output file *outtext* and parse important
quantum chemical information from it in
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
# TODO: BCC
# CI
# QCISD(T)
# other ROHF tests
# vcc/ecc
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# Process NRE
mobj = re.search(r'^\s+' + r'(?:Nuclear repulsion energy :)' + r'\s+' + NUMBER + r'\s+a\.u\.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched nre')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# Process SCF
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s+' + NUMBER + r'\s+a\.u\.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched scf1')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\)=)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched scf2')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
mobj = re.search(
r'^\s+' + r'(?:SCF has converged.)' + r'\s*$' +
r'(?:.*?)' +
r'^\s+' + r'(?:\d+)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched scf3')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
# Process MP2
mobj = re.search(
r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched mp2r')
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = 2 * Decimal(mobj.group(1))
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(2)
psivar['MP2 CORRELATION ENERGY'] = 2 * Decimal(mobj.group(1)) + Decimal(mobj.group(2))
psivar['MP2 TOTAL ENERGY'] = mobj.group(4)
mobj = re.search(
r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(BB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched mp2u')
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(2))
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(3)
psivar['MP2 CORRELATION ENERGY'] = Decimal(mobj.group(1)) + \
Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['MP2 TOTAL ENERGY'] = mobj.group(5)
mobj = re.search(
r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(BB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(SINGLE\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$',
outtext, re.MULTILINE)
if mobj:
print('matched mp2ro')
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(2))
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(3)
psivar['MP2 SINGLES ENERGY'] = mobj.group(4)
psivar['MP2 CORRELATION ENERGY'] = Decimal(mobj.group(1)) + \
Decimal(mobj.group(2)) + Decimal(mobj.group(3)) + Decimal(mobj.group(4))
psivar['MP2 TOTAL ENERGY'] = mobj.group(6)
# Process MP3
mobj = re.search(
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp3r')
dmp2 = Decimal(mobj.group(1))
dmp3 = Decimal(mobj.group(3))
psivar['MP2 CORRELATION ENERGY'] = dmp2
psivar['MP2 TOTAL ENERGY'] = mobj.group(2)
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(4)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
mobj = re.search(
r'^\s+' + r'(?:S-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp3ro')
dmp2 = Decimal(mobj.group(1)) + Decimal(mobj.group(3))
dmp3 = Decimal(mobj.group(5)) + Decimal(mobj.group(7))
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(8)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
# Process MP4
mobj = re.search(
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:Q-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4r')
dmp2 = Decimal(mobj.group(1))
dmp3 = Decimal(mobj.group(3))
dmp4sdq = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) + Decimal(mobj.group(9))
psivar['MP2 CORRELATION ENERGY'] = dmp2
psivar['MP2 TOTAL ENERGY'] = mobj.group(2)
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(4)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
psivar['MP4(SDQ) CORRELATION ENERGY'] = dmp2 + dmp3 + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(10)
mobj = re.search(
r'^\s+' + r'(?:S-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:L-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:NL-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4ro')
dmp2 = Decimal(mobj.group(1)) + Decimal(mobj.group(3))
dmp3 = Decimal(mobj.group(5)) + Decimal(mobj.group(7))
dmp4sdq = Decimal(mobj.group(9)) + Decimal(mobj.group(11))
psivar['MP2 CORRELATION ENERGY'] = dmp2
psivar['MP2 TOTAL ENERGY'] = mobj.group(4)
psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3
psivar['MP3 TOTAL ENERGY'] = mobj.group(8)
psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3
psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY']
psivar['MP4(SDQ) CORRELATION ENERGY'] = dmp2 + dmp3 + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(12)
mobj = re.search(
r'^\s+' + r'(?:D-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:Q-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:S-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:T-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4tr')
dmp4sdq = Decimal(mobj.group(1)) + Decimal(mobj.group(3)) + Decimal(mobj.group(5))
dmp4t = Decimal(mobj.group(7))
psivar['MP4(SDQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(6)
psivar['MP4(T) CORRECTION ENERGY'] = dmp4t
psivar['MP4(SDTQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq + dmp4t
psivar['MP4(SDTQ) TOTAL ENERGY'] = mobj.group(8)
psivar['MP4 CORRELATION ENERGY'] = psivar['MP4(SDTQ) CORRELATION ENERGY']
psivar['MP4 TOTAL ENERGY'] = psivar['MP4(SDTQ) TOTAL ENERGY']
mobj = re.search(
r'^\s+' + r'(?:L-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:NL-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:WT12-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'(?:T-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched mp4tro')
dmp4sdq = Decimal(mobj.group(1)) + Decimal(mobj.group(3))
dmp4t = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) # TODO: WT12 with T, not SDQ?
psivar['MP4(SDQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq
psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(4)
psivar['MP4(T) CORRECTION ENERGY'] = dmp4t
psivar['MP4(SDTQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq + dmp4t
psivar['MP4(SDTQ) TOTAL ENERGY'] = mobj.group(8)
psivar['MP4 CORRELATION ENERGY'] = psivar['MP4(SDTQ) CORRELATION ENERGY']
psivar['MP4 TOTAL ENERGY'] = psivar['MP4(SDTQ) TOTAL ENERGY']
# Process CC Iterations
mobj = re.search(
r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:\d+)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+DIIS\s*' +
r'^\s*(?:-+)\s*' +
r'^\s*(?:A miracle (?:has come|come) to pass. The CC iterations have converged.)\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched cc with full %s iterating %s' % (mobj.group('fullCC'), mobj.group('iterCC')))
psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(3)
psivar['%s TOTAL ENERGY' % (mobj.group('iterCC'))] = mobj.group(4)
# Process CC(T)
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s+' + NUMBER + r'\s+a\.u\.\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:E\(CCSD\))' + r'\s+=\s+' + NUMBER + r'\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:E\(CCSD\(T\)\))' + r'\s+=\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched ccsd(t) vcc')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
psivar['CCSD TOTAL ENERGY'] = mobj.group(2)
psivar['(T) CORRECTION ENERGY'] = Decimal(mobj.group(3)) - Decimal(mobj.group(2))
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(3)) - Decimal(mobj.group(1))
psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(3)
mobj = re.search(
r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s*' + NUMBER + r'\s+a\.u\.\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:CCSD energy)' + r'\s+' + NUMBER + r'\s*' +
r'(?:.*?)' +
r'^\s+' + r'(?:Total perturbative triples energy:)' + r'\s+' + NUMBER + r'\s*' +
r'^\s*(?:-+)\s*' +
r'^\s+' + r'(?:CCSD\(T\) energy)' + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched ccsd(t) ecc')
psivar['SCF TOTAL ENERGY'] = mobj.group(1)
psivar['CCSD TOTAL ENERGY'] = mobj.group(2)
psivar['(T) CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(4)) - Decimal(mobj.group(1))
psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(4)
mobj = re.search(
r'^\s+' + r'(?:CCSD energy)' + r'\s+' + NUMBER + r'\s*' +
r'^\s*(?:-+)\s*' +
r'^\s+' + r'(?:CCSD\(T\) energy)' + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched ccsd(t) lamb')
psivar['CCSD TOTAL ENERGY'] = mobj.group(1)
psivar['(T) CORRECTION ENERGY'] = Decimal(mobj.group(2)) - Decimal(mobj.group(1))
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(2)) - psivar['SCF TOTAL ENERGY']
psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(2)
# Process SCS-CC
mobj = re.search(
r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' +
r'(?:.*?)' +
r'^\s*' + r'(?:@CCENRG-I, Correlation energies.)' + r'\s+(?:ECCAA)\s+' + NUMBER + r'\s*' +
r'^\s+(?:ECCBB)\s+' + NUMBER + '\s*' +
r'^\s+(?:ECCAB)\s+' + NUMBER + '\s*' +
r'^\s+(?:Total)\s+' + NUMBER + '\s*',
outtext, re.MULTILINE | re.DOTALL)
if mobj: # PRINT=2 to get SCS-CC components
print('matched scscc')
psivar['%s SAME-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = Decimal(mobj.group(3)) + Decimal(mobj.group(4))
psivar['%s OPPOSITE-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(5)
psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(6)
mobj = re.search(
r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' +
r'(?:.*?)' +
r'^\s+' + r'Amplitude equations converged in' + r'\s*\d+\s*' + r'iterations.\s*' +
r'^\s+' + r'The AA contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'The BB contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'The AB contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' +
r'^\s+' + r'The total correlation energy is\s+' + NUMBER + r'\s+a.u.\s*' +
r'(?:.*?)' +
#r'^\s+' + r'The CC iterations have converged.' + r'\s*$',
r'^\s+' + r'(?:A miracle come to pass. )?' + r'The CC iterations have converged.' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj: # PRINT=2 to get SCS components
print('matched scscc2')
psivar['%s SAME-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = Decimal(mobj.group(3)) + Decimal(mobj.group(4))
psivar['%s OPPOSITE-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(5)
psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(6)
# Process gradient
mobj = re.search(
r'\s+' + r'Molecular gradient' + r'\s*' +
r'\s+' + r'------------------' + r'\s*' +
r'\s+' + r'\n' +
r'(?:(?:\s+[A-Z]+\s*#\d+\s+[xyz]\s+[-+]?\d+\.\d+\s*\n)+)' + # optional, it seems
r'\n\n' + # optional, it seems
r'((?:\s+[A-Z]+\s*#\d+\s+\d?\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' +
r'\n\n' +
r'\s+' + 'Molecular gradient norm',
outtext, re.MULTILINE)
if mobj:
print('matched molgrad')
atoms = []
psivar_grad = []
for line in mobj.group(1).splitlines():
lline = line.split()
atoms.append(lline[0])
#psivar_gradient.append([Decimal(lline[-3]), Decimal(lline[-2]), Decimal(lline[-1])])
psivar_grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])])
# Process geometry
mobj = re.search(
# r'\s+(?:-+)\s*' +
# r'^\s+' + r'Z-matrix Atomic Coordinates (in bohr)' + r'\s*' +
r'^\s+' + r'Symbol Number X Y Z' + r'\s*' +
r'^\s+(?:-+)\s*' +
r'((?:\s+[A-Z]+\s+[0-9]+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' +
r'^\s+(?:-+)\s*',
outtext, re.MULTILINE)
if mobj:
print('matched geom')
molxyz = '%d bohr\n\n' % len(mobj.group(1).splitlines())
for line in mobj.group(1).splitlines():
lline = line.split()
molxyz += '%s %16s %16s %16s\n' % (lline[0], lline[-3], lline[-2], lline[-1])
# Rather a dinky Molecule as no ghost, charge, or multiplicity
psivar_coord = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
# Process atom geometry
mobj = re.search(
r'^\s+' + r'@GETXYZ-I, 1 atoms read from ZMAT.' + r'\s*' +
r'^\s+' + r'[0-9]+\s+([A-Z]+)\s+[0-9]+\s+' + NUMBER + r'\s*',
outtext, re.MULTILINE)
if mobj:
print('matched atom')
# Dinky Molecule
molxyz = '1 bohr\n\n%s 0.0 0.0 0.0\n' % (mobj.group(1))
psivar_coord = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
# Process error codes
mobj = re.search(
r'^\s*' + r'--executable ' + r'(\w+)' + r' finished with status' + r'\s+' + r'([1-9][0-9]*)',
outtext, re.MULTILINE)
if mobj:
print('matched error')
psivar['CFOUR ERROR CODE'] = mobj.group(2)
# Process CURRENT energies (TODO: needs better way)
if 'SCF TOTAL ENERGY' in psivar:
psivar['CURRENT REFERENCE ENERGY'] = psivar['SCF TOTAL ENERGY']
psivar['CURRENT ENERGY'] = psivar['SCF TOTAL ENERGY']
if 'MP2 TOTAL ENERGY' in psivar and 'MP2 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP2 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP2 TOTAL ENERGY']
if 'MP3 TOTAL ENERGY' in psivar and 'MP3 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP3 TOTAL ENERGY']
if 'MP4 TOTAL ENERGY' in psivar and 'MP4 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP4 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP4 TOTAL ENERGY']
# if ('%s TOTAL ENERGY' % (mobj.group('fullCC')) in psivar) and \
# ('%s CORRELATION ENERGY' % (mobj.group('fullCC')) in psivar):
# psivar['CURRENT CORRELATION ENERGY'] = psivar['%s CORRELATION ENERGY' % (mobj.group('fullCC')]
# psivar['CURRENT ENERGY'] = psivar['%s TOTAL ENERGY' % (mobj.group('fullCC')]
if 'CC2 TOTAL ENERGY' in psivar and 'CC2 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CC2 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CC2 TOTAL ENERGY']
if 'CCSD TOTAL ENERGY' in psivar and 'CCSD CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD TOTAL ENERGY']
if 'CCSD(T) TOTAL ENERGY' in psivar and 'CCSD(T) CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T) CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T) TOTAL ENERGY']
if 'CC3 TOTAL ENERGY' in psivar and 'CC3 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CC3 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CC3 TOTAL ENERGY']
if 'CCSDT TOTAL ENERGY' in psivar and 'CCSDT CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSDT CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSDT TOTAL ENERGY']
return psivar, psivar_coord, psivar_grad
def harvest(p4Mol, c4out, **largs):
"""Parses all the pieces of output from Cfour: the stdout in
*c4out* and the contents of various scratch files like GRD stored
in their namesake keys in *largs*. Since all Cfour output uses
its own orientation and atom ordering for the given molecule,
a qcdb.Molecule *p4Mol*, if supplied, is used to transform the
Cfour output back into consistency with *p4Mol*.
"""
# Collect results from output file and subsidiary files
outPsivar, outMol, outGrad = harvest_output(c4out)
if 'GRD' in largs:
grdMol, grdGrad = harvest_GRD(largs['GRD'])
else:
grdMol, grdGrad = None, None
if 'FCMFINAL' in largs:
fcmHess = harvest_FCM(largs['FCMFINAL'])
else:
fcmHess = None
if 'DIPOL' in largs:
dipolDip = harvest_DIPOL(largs['DIPOL'])
else:
dipolDip = None
# Reconcile the coordinate information: several cases
# Case p4Mol GRD Check consistency Apply orientation? ReturnMol (1-19-2014)
# sp with mol thru cfour {} None None outMol N.C. outMol
# opt with mol thru cfour {} None grdMol outMol && grdMol N.C. grdMol
# sp with mol thru molecule {} p4Mol None p4Mol && outMol p4Mol <-- outMol p4Mol (same as input arg)
# opt with mol thru molecule {} p4Mol grdMol p4Mol && outMol && grdMol p4Mol <-- grdMol p4Mol (same as input arg)
if outMol:
if grdMol:
if abs(outMol.nuclear_repulsion_energy() - grdMol.nuclear_repulsion_energy()) > 1.0e-3:
raise ValidationError("""Cfour outfile (NRE: %f) inconsistent with Cfour GRD (NRE: %f).""" % \
(outMol.nuclear_repulsion_energy(), grdMol.nuclear_repulsion_energy()))
if p4Mol:
if abs(outMol.nuclear_repulsion_energy() - p4Mol.nuclear_repulsion_energy()) > 1.0e-3:
raise ValidationError("""Cfour outfile (NRE: %f) inconsistent with Psi4 input (NRE: %f).""" % \
(outMol.nuclear_repulsion_energy(), p4Mol.nuclear_repulsion_energy()))
else:
raise ValidationError("""No coordinate information extracted from Cfour output.""")
# print ' <<< [1] P4-MOL >>>'
# if p4Mol:
# p4Mol.print_out_in_bohr()
# print ' <<< [2] C4-OUT-MOL >>>'
# if outMol:
# outMol.print_out_in_bohr()
# print ' <<< [3] C4-GRD-MOL >>>'
# if grdMol:
# grdMol.print_out_in_bohr()
# Set up array reorientation object
if p4Mol and grdMol:
p4c4 = OrientMols(p4Mol, grdMol)
oriCoord = p4c4.transform_coordinates2(grdMol)
oriGrad = p4c4.transform_gradient(grdGrad)
oriDip = None if dipolDip is None else p4c4.transform_vector(dipolDip)
elif p4Mol and outMol:
p4c4 = OrientMols(p4Mol, outMol)
oriCoord = p4c4.transform_coordinates2(outMol)
oriGrad = None
oriDip = None if dipolDip is None else p4c4.transform_vector(dipolDip)
elif outMol:
oriCoord = None
oriGrad = None
oriDip = None if dipolDip is None else dipolDip
# print p4c4
# print ' <<< [4] C4-ORI-MOL >>>'
# if oriCoord is not None:
# for item in oriCoord:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
#
# print ' <<< [1] C4-GRD-GRAD >>>'
# if grdGrad is not None:
# for item in grdGrad:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# print ' <<< [2] C4-ORI-GRAD >>>'
# if oriGrad is not None:
# for item in oriGrad:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
retMol = None if p4Mol else grdMol
if oriDip:
outPsivar['CURRENT DIPOLE X'] = str(oriDip[0] * psi_dipmom_au2debye)
outPsivar['CURRENT DIPOLE Y'] = str(oriDip[1] * psi_dipmom_au2debye)
outPsivar['CURRENT DIPOLE Z'] = str(oriDip[2] * psi_dipmom_au2debye)
if oriGrad:
retGrad = oriGrad
elif grdGrad:
retGrad = grdGrad
else:
retGrad = None
return outPsivar, retGrad, retMol
def harvest_GRD(grd):
"""Parses the contents *grd* of the Cfour GRD file into the gradient
array and coordinate information. The coordinate info is converted
into a rather dinky Molecule (no charge, multiplicity, or fragment),
but this is these coordinates that govern the reading of molecule
orientation by Cfour. Return qcdb.Molecule and gradient array.
"""
grd = grd.splitlines()
Nat = int(grd[0].split()[0])
molxyz = '%d bohr\n\n' % (Nat)
grad = []
for at in range(Nat):
mline = grd[at + 1].split()
el = 'GH' if int(float(mline[0])) == 0 else z2el[int(float(mline[0]))]
molxyz += '%s %16s %16s %16s\n' % (el, mline[-3], mline[-2], mline[-1])
lline = grd[at + 1 + Nat].split()
grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])])
mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
return mol, grad
def harvest_zmat(zmat):
"""Parses the contents of the Cfour ZMAT file into array and
coordinate information. The coordinate info is converted into a
rather dinky Molecule (no fragment, but does read charge, mult,
unit). Return qcdb.Molecule. Written for findif zmat* where
geometry always Cartesian and Bohr.
"""
zmat = zmat.splitlines()[1:] # skip comment line
Nat = 0
readCoord = True
isBohr = ''
charge = 0
mult = 1
molxyz = ''
cgeom = []
for line in zmat:
if line.strip() == '':
readCoord = False
elif readCoord:
lline = line.split()
molxyz += line + '\n'
Nat += 1
else:
if line.find('CHARGE') > -1:
idx = line.find('CHARGE')
charge = line[idx + 7:]
idxc = charge.find(',')
if idxc > -1:
charge = charge[:idxc]
charge = int(charge)
if line.find('MULTIPLICITY') > -1:
idx = line.find('MULTIPLICITY')
mult = line[idx + 13:]
idxc = mult.find(',')
if idxc > -1:
mult = mult[:idxc]
mult = int(mult)
if line.find('UNITS=BOHR') > -1:
isBohr = ' bohr'
molxyz = '%d%s\n%d %d\n' % (Nat, isBohr, charge, mult) + molxyz
mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
return mol
def harvest_FCM(fcm):
"""Parses the contents *fcm* of the Cfour FCMFINAL file into a hessian array.
"""
fcm = fcm.splitlines()
Nat = int(fcm[0].split()[0])
Ndof = int(fcm[0].split()[1])
empty = True
hess = []
for df in range(Ndof):
for at in range(Nat):
lline = fcm[Ndof * at + at + 1].split()
if empty:
if (abs(float(lline[0])) > 1.0e-8) or \
(abs(float(lline[1])) > 1.0e-8) or \
(abs(float(lline[2])) > 1.0e-8):
empty = False
fcm.append([float(lline[0]), float(lline[1]), float(lline[2])])
return None if empty else hess
def harvest_DIPOL(dipol):
"""Parses the contents *dipol* of the Cfour DIPOL file into a dipol vector.
"""
dipol = dipol.splitlines()
lline = dipol[0].split()
dip = [float(lline[0]), float(lline[1]), float(lline[2])]
#return None if empty else dip
return dip
def muster_memory(mem):
"""Transform input *mem* in MB into psi4-type options for cfour.
"""
text = ''
# prepare memory keywords to be set as c-side keywords
options = defaultdict(lambda: defaultdict(dict))
options['CFOUR']['CFOUR_MEMORY_SIZE']['value'] = int(mem)
options['CFOUR']['CFOUR_MEM_UNIT']['value'] = 'MB'
for item in options['CFOUR']:
options['CFOUR'][item]['clobber'] = True
return text, options
# Ways of modifying a computation
# global: set global c-side option
# local: set local c-side option
# kwarg: set kwarg
# i-local: set global=local c-side option to an interface module
# ro-def: code uses default entirely specified by read_options
# module-def: code uses default that is complex mixture of read_options settings
# i-def: interfaced code uses defaults not entirely expressed in read_options
# driver-def: driver code sets complex defaults
#
# Pure psi4 operation
# kwarg ~= local > global > driver-def > module-def > ro-def
#
# Interfaced psi4 operation
# kwarg ~= i-local > local > global > driver-def > i-def
# P4 infrastructure replacing interfaced infrastructure (mol, basis, mem) where unavoidable overlap in how things are specified (mult in mol{} vs keyword) is treated as a clobber & complain if conflict VS P4 infrastructure as an aliased/convenient leak into interfaced infrastructure (psi) and is strictly no clobber or complain.
def muster_psi4options(opt):
"""Translate psi4 keywords *opt* that have been explicitly set into
their Cfour counterparts. Since explicitly set Cfour module keyword
values will always be used preferentially to these inferred from
psi4, the 'clobber' property is set to False.
"""
text = ''
options = defaultdict(lambda: defaultdict(dict))
if 'GLOBALS' in opt:
if 'PUREAM' in opt['GLOBALS']:
options['CFOUR']['CFOUR_SPHERICAL']['value'] = \
opt['MINTS']['PUREAM']['value']
if 'SCF' in opt:
if 'REFERENCE' in opt['SCF']:
options['CFOUR']['CFOUR_REFERENCE']['value'] = \
{'RHF': 'RHF',
'UHF': 'UHF',
'ROHF': 'ROHF'}[opt['SCF']['REFERENCE']['value']]
if 'D_CONVERGENCE' in opt['SCF']:
options['CFOUR']['CFOUR_SCF_CONV']['value'] = \
conv_float2negexp(opt['SCF']['D_CONVERGENCE']['value'])
if 'MAXITER' in opt['SCF']:
options['CFOUR']['CFOUR_SCF_MAXCYC']['value'] = \
opt['SCF']['MAXITER']['value']
if 'DAMPING_PERCENTAGE' in opt['SCF']:
options['CFOUR']['CFOUR_SCF_DAMPING']['value'] = \
int(10 * opt['SCF']['DAMPING_PERCENTAGE']['value'])
for item in options['CFOUR']:
options['CFOUR'][item]['clobber'] = False
return text, options
# Philosophy break:
# Specification options
# Massaging options
# * No program's defaults should be tampered with w/o provokation
# want all defaults applied to all programs, so p4 scf_conv is 5 and c4 scf_conv is 5
# want separate regimes, so conv 6 covers all the p4 parts and cfour_conv = 8 covers the c4 parts
# want mixture, so basis gets applied to c4 but others don't
# first case, when options specified explicitly
# [scf][d_convergence] [cfour][cfour_scf_conv] what happens?
# 8 from opt() 7 by default
# 6 from set {...} 7 by default 6 (guideline that psi4 format converts when clear)
# 8 from opt() 5 from set {...} 5 (local trumps)
# 6 from set {...} 5 from set {...} 5 (local trumps)
#
# energy(name) [cfour][cfour_calc_level]
# c4-scf SCF by default
# c4-scf CCSD from set {...}
def muster_modelchem(name, dertype):
"""Transform calculation method *name* and derivative level *dertype*
into options for cfour. While deliberately requested pieces,
generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, like |cfour__cfour_cc_program|, can be
countermanded by keywords in input file ('clobber' set to False).
Occasionally, want these pieces to actually overcome keywords in
input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
if dertype == 0:
if lowername == 'cfour':
pass # permit clean operation of sandwich mode
else:
options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'ZERO'
elif dertype == 1:
options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'FIRST'
elif dertype == 2:
options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'SECOND'
else:
raise ValidationError("""Requested Cfour dertype %d is not available.""" % (dertype))
if lowername == 'cfour':
pass
elif lowername == 'c4-scf':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'SCF'
elif lowername == 'c4-mp2':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP2'
elif lowername == 'c4-mp3':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP3'
elif lowername == 'c4-mp4(sdq)':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'SDQ-MP4'
elif lowername == 'c4-mp4':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP4'
elif lowername == 'c4-cc2':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CC2'
elif lowername == 'c4-ccsd':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC'
elif lowername == 'c4-cc3':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CC3'
elif lowername == 'c4-ccsd(t)':
# Can't use (T) b/c bug in xsymcor lops it off
#options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD(T)'
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD[T]'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC'
elif lowername == 'c4-ccsdt':
options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSDT'
options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC'
else:
raise ValidationError("""Requested Cfour computational methods %d is not available.""" % (lowername))
# Set clobbering
if 'CFOUR_DERIV_LEVEL' in options['CFOUR']:
options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True
options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True
if 'CFOUR_CALC_LEVEL' in options['CFOUR']:
options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True
options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True
if 'CFOUR_CC_PROGRAM' in options['CFOUR']:
options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False
return text, options
def cfour_list():
"""Return an array of Cfour methods with energies. Appended
to procedures['energy'].
"""
val = []
val.append('cfour')
val.append('c4-scf')
val.append('c4-mp2')
val.append('c4-mp3')
val.append('c4-mp4(sdq)')
val.append('c4-mp4')
val.append('c4-cc2')
val.append('c4-ccsd')
val.append('c4-cc3')
val.append('c4-ccsd(t)')
val.append('c4-ccsdt')
return val
def cfour_gradient_list():
"""Return an array of Cfour methods with analytical gradients.
Appended to procedures['gradient'].
"""
val = []
val.append('cfour')
val.append('c4-scf')
val.append('c4-mp2')
val.append('c4-mp3')
val.append('c4-mp4(sdq)')
val.append('c4-mp4')
val.append('c4-cc2')
val.append('c4-ccsd')
val.append('c4-cc3')
val.append('c4-ccsd(t)')
val.append('c4-ccsdt')
return val
def cfour_psivar_list():
"""Return a dict with keys of most Cfour methods and values of dicts
with the PSI Variables returned by those methods. Used by cbs()
wrapper to avoid unnecessary computations in compound methods.
Result is appended to ``VARH``.
"""
VARH = {}
VARH['c4-scf'] = {
'c4-scftot': 'SCF TOTAL ENERGY'}
VARH['c4-mp2'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY'}
VARH['c4-mp3'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-mp2.5corl': 'MP2.5 CORRELATION ENERGY',
'c4-mp3corl': 'MP3 CORRELATION ENERGY'}
VARH['c4-mp4(sdq)'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-mp2.5corl': 'MP2.5 CORRELATION ENERGY',
'c4-mp3corl': 'MP3 CORRELATION ENERGY',
'c4-mp4(sdq)corl': 'MP4(SDQ) CORRELATION ENERGY'}
VARH['c4-mp4'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-mp2.5corl': 'MP2.5 CORRELATION ENERGY',
'c4-mp3corl': 'MP3 CORRELATION ENERGY',
'c4-mp4(sdq)corl': 'MP4(SDQ) CORRELATION ENERGY',
'c4-mp4corl': 'MP4(SDTQ) CORRELATION ENERGY'}
VARH['c4-cc2'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-cc2corl': 'CC2 CORRELATION ENERGY'}
VARH['c4-ccsd'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-ccsdcorl': 'CCSD CORRELATION ENERGY'}
VARH['c4-cc3'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-cc3corl': 'CC3 CORRELATION ENERGY'}
VARH['c4-ccsd(t)'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-ccsdcorl': 'CCSD CORRELATION ENERGY',
'c4-ccsd(t)corl': 'CCSD(T) CORRELATION ENERGY'}
VARH['c4-ccsdt'] = {
'c4-scftot': 'SCF TOTAL ENERGY',
'c4-mp2corl': 'MP2 CORRELATION ENERGY',
'c4-ccsdcorl': 'CCSD CORRELATION ENERGY',
'c4-ccsdtcorl': 'CCSDT CORRELATION ENERGY'}
return VARH
#def backtransform(chgeMol, permMol, chgeGrad=None, chgeDip=None):
#def format_fjobarc(fje, fjelem, fjcoord, fjgrd, map, fjdip):
def format_fjobarc(energy, map, elem, coordinates, gradient, dipole):
"""Takes the key results from a gradient computation (*energy*,
element Z list *elem*, *coordinates*, *gradient*,
*dipole*, and atom ordering *map*) and writes a string *fja*
that exactly mimics the contents of a Cfour FJOBARC file.
"""
fja = 'TOTENERG\n'
fja += '%15d%15d\n' % (struct.unpack("ii", struct.pack("d", energy)))
fja += 'COORD\n'
Nat = len(coordinates)
flatcoord = []
for at in range(Nat):
for xyz in range(3):
flatcoord.append(coordinates[map[at]][xyz])
for idx in range(len(flatcoord)):
if abs(flatcoord[idx]) < 1.0E-14: # TODO
flatcoord[idx] = 0.0
fja += '%15d%15d' % (struct.unpack("ii", struct.pack("d", flatcoord[idx])))
if idx % 2 == 1:
fja += '\n'
if len(flatcoord) % 2 == 1:
fja += '\n'
fja += 'MAP2ZMAT\n'
for idx in range(Nat):
fja += '%15d%15d' % (struct.unpack("ii", struct.pack("l", map[idx] + 1)))
if idx % 2 == 1:
fja += '\n'
if Nat % 2 == 1:
fja += '\n'
fja += 'GRD FILE\n'
fja += '%5d%20.10f\n' % (Nat, 0.0)
for at in range(Nat):
fja += '%20.10f%20.10f%20.10f%20.10f\n' % (elem[at], coordinates[at][0], coordinates[at][1], coordinates[at][2])
for at in range(Nat):
fja += '%20.10f%20.10f%20.10f%20.10f\n' % (elem[at], gradient[at][0], gradient[at][1], gradient[at][2])
fja += 'DIPOL FILE\n'
fja += '%20.10f%20.10f%20.10f\n' % (dipole[0], dipole[1], dipole[2])
return fja
def backtransform(chgeMol, permMol, chgeGrad=None, chgeDip=None):
"""Here, *chgeMol* and *chgeGrd* need to be turned into the native Cfour
orientation embodied by *permMol*. Currently for vpt2.
"""
# Set up array reorientation object
p4c4 = OrientMols(permMol, chgeMol) # opposite than usual
oriCoord = p4c4.transform_coordinates2(chgeMol)
p4Elem = []
for at in range(chgeMol.natom()):
p4Elem.append(chgeMol.Z(at))
oriElem = p4c4.transform_elementlist(p4Elem)
oriElemMap = p4c4.Catommap
oriGrad = None if chgeGrad is None else p4c4.transform_gradient(chgeGrad)
oriDip = None if chgeDip is None else p4c4.transform_vector(chgeDip)
if chgeGrad and chgeDip:
return oriElemMap, oriElem, oriCoord, oriGrad, oriDip
else:
return oriElemMap, oriElem, oriCoord
#def backtransform_grad(p4Mol, c4Mol, p4Grd, p4Dip):
# """Here, p4Mol and p4Grd need to be turned into the native Cfour
# orientation embodied by c4Mol. Currently for vpt2.
#
# """
# # Set up array reorientation object
# p4c4 = OrientMols(c4Mol, p4Mol) # opposite than usual
# oriCoord = p4c4.transform_coordinates2(p4Mol)
# oriGrad = p4c4.transform_gradient(p4Grd)
# p4Elem = []
# for at in range(p4Mol.natom()):
# p4Elem.append(p4Mol.Z(at))
# oriElem = p4c4.transform_elementlist(p4Elem)
# oriElemMap = p4c4.Catommap
# oriDip = p4c4.transform_vector(p4Dip)
#
# #print p4c4
# #print ' <<< Input C4 Mol >>>'
# #c4Mol.print_out()
# #print ' <<< Input P4 Mol >>>'
# #p4Mol.print_out()
# #print ' <<< Input P4 Grad >>>'
# #if p4Grd is not None:
# # for item in p4Grd:
# # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# #print ' <<< Rotated P4 Coord >>>'
# #if oriCoord is not None:
# # for item in oriCoord:
# # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# #print ' <<< Rotated P4 Elem >>>'
# #if oriElem is not None:
# # for item in oriElem :
# # print(' %16.8f' % (item))
# #print ' <<< Rotated P4 Dip >>>'
# #if oriDip is not None:
# # print(' %16.8f %16.8f %16.8f' % (oriDip[0], oriDip[1], oriDip[2]))
# #print ' <<< Rotated P4 Grad >>>'
# #if oriGrad is not None:
# # for item in oriGrad:
# # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
#
# return oriElemMap, oriElem, oriCoord, oriGrad, oriDip
# #return oriElem, oriCoord, oriGrad, oriElemMap, oriDip
def jajo2mol(jajodic):
"""Returns a Molecule from entries in dictionary *jajodic* extracted
from JAINDX and JOBARC.
"""
map = jajodic['MAP2ZMAT']
elem = jajodic['ATOMCHRG']
coord = jajodic['COORD ']
Nat = len(elem)
molxyz = '%d bohr\n\n' % (Nat)
# TODO chgmult, though not really necessary for reorientation
for at in range(Nat):
posn = map[at] - 1
el = 'GH' if elem[posn] == 0 else z2el[elem[posn]]
posn *= 3
molxyz += '%s %21.15f %21.15f %21.15f\n' % (el, coord[posn], coord[posn + 1], coord[posn + 2])
mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True)
return mol
|
kratman/psi4public
|
psi4/driver/qcdb/cfour.py
|
Python
|
gpl-2.0
| 46,066
|
[
"CFOUR",
"Psi4"
] |
27b671c5127e22fd6f537d7425141955eb4e055e2b71587646a9b7c5b68b0d68
|
#!/usr/bin/env python
import logging
import sys
import argparse
import vcf
import pysam
from metasv.breakdancer_reader import BreakDancerReader
from metasv.pindel_reader import PindelReader
from metasv.cnvnator_reader import CNVnatorReader
from metasv.breakseq_reader import BreakSeqReader
from metasv.vcf_utils import get_template
from metasv.fasta_utils import get_contigs
logging.basicConfig()
logger = logging.getLogger(__name__)
tool_to_reader = {"BreakDancer": BreakDancerReader, "Pindel": PindelReader, "CNVnator": CNVnatorReader,
"BreakSeq": BreakSeqReader}
def convert_svtool_to_vcf(file_name, sample, out_vcf, toolname, reference, sort=False, index=False):
vcf_template_reader = get_template()
vcf_template_reader.samples = [sample]
vcf_fd = open(out_vcf, "w") if out_vcf is not None else sys.stdout
vcf_writer = vcf.Writer(vcf_fd, vcf_template_reader)
reference_handle = pysam.Fastafile(reference) if reference else None
reference_contigs = get_contigs(reference)
if sort:
if not reference_contigs:
logger.warn("Chromosomes will be sorted in lexicographic order since reference is missing")
else:
logger.info("Chromosomes will be sorted by the reference order")
vcf_records = []
for tool_record in tool_to_reader[toolname](file_name, reference_handle=reference_handle):
vcf_record = tool_record.to_vcf_record(sample)
if vcf_record is None:
continue
if sort:
vcf_records.append(vcf_record)
else:
vcf_writer.write_record(vcf_record)
if sort:
if reference_contigs:
contigs_order_dict = {contig.name: index for (index, contig) in enumerate(reference_contigs)}
vcf_records.sort(
cmp=lambda x, y: cmp((contigs_order_dict[x.CHROM], x.POS), (contigs_order_dict[y.CHROM], y.POS)))
else:
vcf_records.sort(cmp=lambda x, y: cmp((x.CHROM, x.POS), (y.CHROM, y.POS)))
for vcf_record in vcf_records:
vcf_writer.write_record(vcf_record)
vcf_writer.close()
if out_vcf and index:
pysam.tabix_index(out_vcf, force=True, preset='vcf')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert SV tool output file to VCF",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", help="SV tool output file", required=False)
parser.add_argument("--output", help="Output VCF to create", required=False)
parser.add_argument("--tool", help="Tool name", required=False, default="BreakDancer",
choices=sorted(tool_to_reader.keys()))
parser.add_argument("--sample", help="Sample name", required=True)
parser.add_argument("--reference", help="Reference FASTA")
parser.add_argument("--sort", action="store_true", help="Sort the VCF records before writing")
parser.add_argument("--index", action="store_true", help="Tabix compress and index the output VCF")
args = parser.parse_args()
convert_svtool_to_vcf(args.input, args.sample, args.output, args.tool, args.reference, sort=args.sort,
index=args.index)
|
chapmanb/metasv
|
scripts/svtool_to_vcf.py
|
Python
|
bsd-2-clause
| 3,249
|
[
"pysam"
] |
c22c56e6e7a17746946d80e0fce0da7e2c1642967ab244c267b7bffd1c9c4272
|
# coding=utf-8
# Copyright 2022 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for the Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import math
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow import layers
from mesh_tensorflow.transformer import attention
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class DenseReluDense(transformer.TransformerLayer):
"""Two dense layers with ReLU or other activation on hidden layer."""
def __init__(self, hidden_size=4096, dropout_rate=0.0, activation="relu",
use_bias=False):
"""Create a DenseReluDense.
Args:
hidden_size: an integer - size of the hidden layer
dropout_rate: a floating-point number
activation: an activation function or a list of activation functions.
see documentation for mtf.layers.dense_product()
use_bias: a boolean, whether to use bias in the dense layers.
"""
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
self.activation = activation
self.use_bias = use_bias
def call(self, context, x, losses=None):
"""Call the layer."""
io_channels = x.shape.dims[-1]
hidden_channels = mtf.Dimension("d_ff", self.hidden_size)
h = mtf.layers.dense_product(x,
reduced_dims=x.shape.dims[-1:],
new_dims=hidden_channels,
activation_functions=self.activation,
use_bias=self.use_bias,
variable_dtype=context.variable_dtype,
name="wi",
expert_dims=context.model.ensemble_dims)
if context.train and self.dropout_rate != 0.0:
h = mtf.dropout(h, context.train, keep_prob=1.0 - self.dropout_rate,
noise_shape=h.shape - context.length_dim)
return mtf.layers.dense(h, io_channels,
use_bias=self.use_bias,
activation=None,
variable_dtype=context.variable_dtype,
reduced_dims=h.shape.dims[-1:],
name="wo",
expert_dims=context.model.ensemble_dims)
def attention_params(context,
kv_dim,
num_heads,
num_memory_heads=0,
shared_kv=False,
no_query=False,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True):
"""Attention Parameters for Transformer Layers.
The num_heads argument indicates the number of read-heads.
For the familiar behavior described in "Attention Is All You Need", set
num_memory_heads=0.
If num_memory_heads==1, then there is only a single write-head, and multiple
read-heads. This leads to faster incremental decoding, since the
recurrent state is smaller
If num_memory_heads > 1, then num_memory_heads indicates the number of
write-heads. A fraction of the read-heads read each write-head.
num_memory_heads must divide num_heads. This behavior has not yet been tested.
no query flag is set to true when we do not want to create parameters
for query params (for synthesizer model).
Args:
context: a transformer.Context
kv_dim: a dimension (for key and value channels)
num_heads: an integer
num_memory_heads: an optional integer
shared_kv: a boolean
no_query: a boolean
combine_dims: a boolean
keep_query_heads_dims: a boolean
fold_scaling_into_initializer: a boolean
Returns:
an attention.AttentionParams object
"""
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.AttentionParams(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=shared_kv,
no_query=no_query,
ensemble_dim=context.model.ensemble_dim,
combine_dims=combine_dims,
keep_query_heads_dims=keep_query_heads_dims,
fold_scaling_into_initializer=fold_scaling_into_initializer)
@gin.configurable
class SelfAttention(transformer.TransformerLayer):
"""Multi-head self-attention layer."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32,
attention_func=None,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True,
z_loss_coeff=None,
use_hyperprompt=False,
hyperprompt_mtlshare=False,
hyperprompt_length_encoder=None,
hyperprompt_length_decoder=None,
hyperprompt_hidden_dim=None,
hyperprompt_task_num=8):
"""Create a SelfAttention Layer.
Args:
num_heads: an integer
num_memory_heads: an optional integer
key_value_size: an integer
shared_kv: a boolean
dropout_rate: a float
attention_kwargs: a dictionary of kwargs for attention.attention
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
attention_func: attention function: None/'hybrid'.
combine_dims: a boolean
keep_query_heads_dims: a boolean
fold_scaling_into_initializer: a boolean
z_loss_coeff: a float, if z_loss_coeff is not None then add an auxiliary
loss to push the attention logits closer to zero. This helps to
stabilize model training.
use_hyperprompt: a boolean, whether to use hypernetwork to enable the info
sharing among task-prompts. Otherwise, MTL-Prompt is enabled if either
hyperprompt_length_encoder or hyperprompt_length_decoder is not None.
hyperprompt_mtlshare: a boolean, whether to share MTL-Prompt project
networks among tasks if MTL-Prompt is activate. Otherwise, each task has
its own project network (MTL-Prompt-Sep).
hyperprompt_length_encoder: an integer, the length of task embeddings
prepended to the keys and values in encoder. If it is None, prompts are
not prepended in the encoder.
hyperprompt_length_decoder: aan integer, the length of task embeddings
prepended to the keys and values in decoder. If it is None, prompts are
not prepended in the decoder.
hyperprompt_hidden_dim: the bottleneck dimension in MLPs to generate
hyper-prompts.
hyperprompt_task_num: an integer, # of tasks in hyperprompt mode.
"""
self.num_heads = num_heads
self.num_memory_heads = num_memory_heads
self.key_value_size = key_value_size
self.shared_kv = shared_kv
self.dropout_rate = dropout_rate
self.attention_kwargs = attention_kwargs or {}
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.attention_func = attention_func
self.combine_dims = combine_dims
self.keep_query_heads_dims = keep_query_heads_dims
self.fold_scaling_into_initializer = fold_scaling_into_initializer
self.z_loss_coeff = z_loss_coeff
self.use_hyperprompt = use_hyperprompt
self.hyperprompt_mtlshare = hyperprompt_mtlshare
self.hyperprompt_length_encoder = hyperprompt_length_encoder
self.hyperprompt_length_decoder = hyperprompt_length_decoder
self.hyperprompt_hidden_dim = hyperprompt_hidden_dim
self.hyperprompt_task_num = hyperprompt_task_num
def layer_output_from_attention_output(self, context, attention_output,
losses):
return attention_output
def expected_attention_output_shape(self, x, params):
if self.keep_query_heads_dims:
return mtf.Shape(x.shape[:-1] + params.query_heads_dims + x.shape[-1:])
return x.shape
def attention_kwargs_from_context(self, context):
kwargs = copy.copy(self.attention_kwargs)
kwargs["dropout_rate"] = self.dropout_rate if context.train else 0.0
if "dropout_broadcast_dims" not in kwargs:
kwargs["dropout_broadcast_dims"] = [context.length_dim]
return kwargs
def make_params(self, context):
return attention_params(
context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer)
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
k = params.compute_k(m)
v = params.compute_v(m)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
if self.shared_kv:
old_kv, = context.get_states(1)
kv = old_kv * inv_one_hot + kv * one_hot
else:
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([kv] if self.shared_kv else [k, v])
if self.shared_kv:
k = kv
v = kv
# Inject hyper-prompts into k and v, skipped when prompt length is None.
scope_encoder_or_decoder = tf.get_variable_scope().name.split("/")[0]
use_prompt_kv = None
if self.hyperprompt_length_encoder and scope_encoder_or_decoder == "encoder":
k, v, memory_position, memory_length = attention.concat_hyper_prompts_kv(
k,
v,
scope_encoder_or_decoder,
self.use_hyperprompt,
memory_length,
self.hyperprompt_task_num,
self.num_heads,
self.hyperprompt_hidden_dim,
self.kv_dim,
context,
self.hyperprompt_mtlshare,
self.dropout_rate,
prompt_length=self.hyperprompt_length_encoder)
use_prompt_kv = "encoder_prompts"
if self.hyperprompt_length_decoder and scope_encoder_or_decoder == "decoder":
k, v, memory_position, memory_length = attention.concat_hyper_prompts_kv(
k,
v,
scope_encoder_or_decoder,
self.use_hyperprompt,
memory_length,
self.hyperprompt_task_num,
self.num_heads,
self.hyperprompt_hidden_dim,
self.kv_dim,
context,
self.hyperprompt_mtlshare,
self.dropout_rate,
prompt_length=self.hyperprompt_length_decoder)
use_prompt_kv = "decoder_prompts"
o = self.attention_fn(
q,
k,
v,
context=context,
memory_length_dim=memory_length,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
bias=self.compute_bias(
context,
memory_position,
x,
params.query_heads_dims,
q,
use_prompt_kv=use_prompt_kv),
z_loss_coeff=self.z_loss_coeff,
**self.attention_kwargs_from_context(context))
attention_output_shape = self.expected_attention_output_shape(x, params)
attention_output = params.compute_output(
o, output_shape=attention_output_shape)
return self.layer_output_from_attention_output(context, attention_output,
losses)
def compute_bias(self,
context,
memory_position,
x,
heads_dims,
q,
use_prompt_kv=None):
"""Compute attention bias.
Args:
context: a transformer.Context
memory_position: an int32 tensor containing memory_length dimension.
x: a Tensor - the query antecedent - required for relative attention
heads_dims: a list of dimensions
q: a Tensor - the queries - required for contextual relative attention
use_prompt_kv: a string, "encoder_prompts" is to add prompts in encoder
"decoder_prompts" is to add prompt in decoder, which affects biases.
Returns:
a Tensor or None
"""
min_relative_position = self.min_relative_position(context) # pylint: disable=assignment-from-none
max_relative_position = self.max_relative_position(context) # pylint: disable=assignment-from-none
biases = []
relative_position = memory_position - context.position
if use_prompt_kv == "encoder_prompts":
relative_position -= self.hyperprompt_length_encoder
elif use_prompt_kv == "decoder_prompts":
relative_position -= self.hyperprompt_length_decoder
if min_relative_position is not None:
visible = mtf.greater_equal(relative_position, min_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if max_relative_position is not None:
visible = mtf.less_equal(relative_position, max_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if context.read_priority is not None:
if use_prompt_kv == "decoder_prompts":
prompt_length_dim = mtf.Dimension(context.length_dim.name,
self.hyperprompt_length_decoder)
write_priority_memory = mtf.ones(
x.mesh, shape=[prompt_length_dim], dtype=tf.int32) * -1
write_priority = mtf.concat(
[write_priority_memory, context.write_priority],
concat_dim_name=context.length_dim.name)
visible = mtf.greater_equal(
context.read_priority,
mtf.layers.rename_length_to_memory_length(write_priority))
else:
visible = mtf.greater_equal(
context.read_priority,
mtf.layers.rename_length_to_memory_length(context.write_priority))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
sequence_id = None
# Subsequence id should only be set if we are in the decoder and have
# multiple targets per input. This will allow each sub-target to only attend
# to itself.
if isinstance(context.subsequence_id, mtf.Tensor):
sequence_id = context.subsequence_id
elif isinstance(context.sequence_id, mtf.Tensor):
sequence_id = context.sequence_id
if (sequence_id is not None and context.length_dim in sequence_id.shape):
if use_prompt_kv:
if use_prompt_kv == "decoder_prompts":
memory_length = mtf.Dimension(
"memory_length",
context.length_dim.size + self.hyperprompt_length_decoder)
elif use_prompt_kv == "encoder_prompts":
memory_length = mtf.Dimension(
"memory_length",
context.length_dim.size + self.hyperprompt_length_encoder)
memory_sequence_id = mtf.ones(
x.mesh, shape=[x.shape.dims[0], memory_length], dtype=tf.int32)
visible = mtf.equal(sequence_id, memory_sequence_id)
else:
visible = mtf.equal(
sequence_id,
self.rename_length_to_memory_length(sequence_id, context))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if self.relative_attention_type is not None:
buckets_dim = mtf.Dimension(
"buckets", self.relative_attention_num_buckets)
bidirectional = not context.model.fully_autoregressive
rp_bucket = _relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=buckets_dim.size)
if (self.relative_attention_type == "bias" or
self.relative_attention_type == "bias_shared"):
bias_shape = context.model.ensemble_dims + heads_dims + [buckets_dim]
values = None
cache = self.relative_attention_type == "bias_shared"
if cache:
cache_key = ("self_attention_bias",
min_relative_position,
max_relative_position,
tuple(heads_dims))
if cache_key in context.cache:
values = context.cache[cache_key]
if values is None:
values = mtf.get_variable(
context.mesh, "relative_attention_bias",
bias_shape, dtype=context.variable_dtype)
if cache:
context.cache[cache_key] = values
elif self.relative_attention_type == "contextual":
values = layers.dense(
q, reduced_dims=[self.kv_dim],
new_dims=[buckets_dim],
variable_dtype=context.variable_dtype,
name="relative_attention_ak",
use_bias=False,
expert_dims=context.model.ensemble_dims + heads_dims)
else:
raise ValueError("unrecognized relative_attention_type \"%s\"" %
self.relative_attention_type)
biases.append(mtf.gather(values, rp_bucket, buckets_dim))
return mtf.add_n(biases) if biases else None
@property
def kv_dim(self):
return mtf.Dimension("d_kv", self.key_value_size)
def memory_length(self, context):
return mtf.Dimension("memory_length", context.length_dim.size)
def rename_length_to_memory_length(self, x, context):
return mtf.replace_dimensions(
x, context.length_dim, self.memory_length(context))
def min_relative_position(self, context):
return None
def max_relative_position(self, context):
return None
@property
def attention_fn(self):
if self.attention_func == "hybrid":
return attention.hybrid_attention
else:
return attention.attention
@gin.configurable
class ExpertsSelfAttention(SelfAttention):
"""Expert-layers for SelfAttention computations."""
def __init__(self,
num_experts=16,
loss_coef=1e-2,
group_size=1024,
capacity_factor_train=1.25,
capacity_factor_eval=2.0,
moe_gating="switch",
min_expert_capacity=4,
switch_policy_train="input_jitter",
switch_policy_eval="input_jitter",
switch_dropout=0.0,
switch_temperature=1.0,
switch_jitter=1e-2,
ntlb_top_k=4,
hidden_size=3072,
activation="relu",
z_loss=None,
expert_computation="qkv",
**kwargs):
super(ExpertsSelfAttention, self).__init__(**kwargs)
self.expert_computation = expert_computation
self._hparams = mtf.transformer.moe.HParams(
moe_gating=moe_gating,
num_experts=num_experts,
loss_coef=loss_coef,
group_size=group_size,
min_expert_capacity=min_expert_capacity,
capacity_factor_train=capacity_factor_train,
capacity_factor_eval=capacity_factor_eval,
switch_policy_train=switch_policy_train,
switch_policy_eval=switch_policy_eval,
switch_dropout=switch_dropout,
switch_temperature=switch_temperature,
switch_jitter=switch_jitter,
ntlb_top_k=ntlb_top_k,
hidden_size=hidden_size,
activation=activation,
z_loss=z_loss)
def make_params(self, context):
num_heads = self.num_heads
num_memory_heads = self.num_memory_heads
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.ExpertsAttentionParams(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=self.shared_kv,
no_query=False,
ensemble_dim=context.model.ensemble_dim,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer,
context=context,
experts_hparams=self._hparams,
expert_computation=self.expert_computation)
@gin.configurable
class ExpertsEncDecAttention(ExpertsSelfAttention):
"""Expert-layers for EncDecAttention computations."""
def __init__(self, relative_attention_type=None, **kwargs):
super(ExpertsEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses)
@gin.configurable
class Synthesizer(SelfAttention):
"""Multi-head Synthesizer layer https://arxiv.org/abs/2005.00743."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32,
attention_func=None,
combine_dims=True,
keep_query_heads_dims=False,
synthesize_mode="random_plus_alpha",
fold_scaling_into_initializer=True,
**kwargs):
"""Create a Synthesizer Layer.
Args:
num_heads: an integer
num_memory_heads: an optional integer
key_value_size: an integer
shared_kv: a boolean
dropout_rate: a float
attention_kwargs: a dictionary of kwargs for attention.attention
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
attention_func: attention function: None/'hybrid'.
combine_dims: a boolean
keep_query_heads_dims: a boolean
synthesize_mode: a string to select synthesizer variant
fold_scaling_into_initializer: a boolean
**kwargs: additional constructor params
"""
super(Synthesizer, self).__init__(**kwargs)
self.num_heads = num_heads
self.num_memory_heads = num_memory_heads
self.key_value_size = key_value_size
self.shared_kv = shared_kv
self.dropout_rate = dropout_rate
self.attention_kwargs = attention_kwargs or {}
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.attention_func = attention_func
self.combine_dims = combine_dims
self.keep_query_heads_dims = keep_query_heads_dims
self.synthesize_mode = synthesize_mode
self.fold_scaling_into_initializer = fold_scaling_into_initializer
self.no_query = False
if "plus" in self.synthesize_mode:
self.shared_kv = False
self.no_query = False
elif "minus" in self.synthesize_mode:
# We still keep the query as first projection
self.shared_kv = True
self.no_query = False
else:
self.shared_kv = True
self.no_query = True
def make_params(self, context):
return attention_params(
context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv,
no_query=self.no_query,
fold_scaling_into_initializer=self.fold_scaling_into_initializer)
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
k = params.compute_k(m)
v = params.compute_v(m)
if self.no_query:
# we don't use q for some synthesizer modes that don't use QKV at all.
q = x
else:
q = params.compute_q(x)
if self.shared_kv:
k = kv
v = kv
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([k, v])
o = attention.synthetic_attention(q, k, v, memory_length,
self.kv_dim, self.kv_dim,
self.compute_bias(context,
memory_position,
x,
params.query_heads_dims,
q),
synthesize=True,
synthesize_mode=self.synthesize_mode,
context=context,
**self.attention_kwargs_from_context(
context))
attention_output_shape = self.expected_attention_output_shape(x, params)
attention_output = params.compute_output(
o, output_shape=attention_output_shape)
return self.layer_output_from_attention_output(context, attention_output,
losses)
@gin.configurable
def relative_position_spans(context, num_sentinels=gin.REQUIRED):
"""Compute relative positions between inputs and targets.
Used by enc_dec_attention_bias.
Assumes that inputs and targets were generated by a span-filling objective:
The inputs consist of the original text with some spans removed and replaced
by single sentinels.
The targets consist of the dropped spans, each preceded by a single sentinel.
Sentinels are the last tokens in the vocabulary.
e.g.
inputs: A B C <S> F G H <S>
shifted-targets: <BOS> <S> D E <S> I J K
Relative positions are computed by identifying a target token with the
corresponding sentinel in the input and returning the distance between these
two tokens in the input.
Target tokens which precede all sentinels get identified with the beginning of
the input. So if we apply this to a problem with no sentinels, all target
tokens will be indentified with the beginning of the input. We assume this is
the case during incremental decoding, so this code will not work properly to
incrementally decode a problem with sentinels. This may not be an issue,
since the span-filling objective is primarily used for unsupervised
pre-training.
Args:
context: a Context
num_sentinels: an integer. Should have the same value as
SentencePieceVocabulary.extra_ids
Returns:
a Tensor
"""
decoder_id = context.inputs
encoder_id = context.encoder_inputs
decoder_length = context.length_dim
encoder_length = context.encoder_length_dim
mesh = encoder_id.mesh
encoder_pos = mtf.range(mesh, encoder_length, tf.int32)
if decoder_length not in decoder_id.shape.dims:
# we are doing incremental decoding.
# Map the target token to the beginning of the input.
dec_to_enc_pos = 0
else:
vocab_size = context.model.input_vocab_size_unpadded
def sentinel_mask(t):
return mtf.cast(mtf.greater_equal(
t, vocab_size - num_sentinels), tf.int32)
decoder_is_sentinel = sentinel_mask(decoder_id)
encoder_is_sentinel = sentinel_mask(encoder_id)
encoder_segment_id = mtf.cumsum(encoder_is_sentinel, encoder_length)
decoder_segment_id = mtf.cumsum(decoder_is_sentinel, decoder_length)
encoder_sequence_id = context.encoder_sequence_id
decoder_sequence_id = context.sequence_id
if encoder_sequence_id is not None:
# distinguish segments from different sequences
multiplier = max(encoder_length.size, decoder_length.size)
encoder_segment_id += encoder_sequence_id * multiplier
decoder_segment_id += decoder_sequence_id * multiplier
dec_to_enc_pos = mtf.reduce_sum(
mtf.cast(mtf.less(encoder_segment_id, decoder_segment_id), tf.int32),
reduced_dim=encoder_length)
return dec_to_enc_pos - encoder_pos
@gin.configurable
def enc_dec_attention_bias(layer,
context,
heads_dims,
relative_position_fn=relative_position_spans):
"""Compute bias term for encoder-decoder attention.
Args:
layer: a TransformerLayer
context: a Context
heads_dims: a list of Dimension
relative_position_fn: an optional function
Returns:
a Tensor
"""
biases = []
if context.encoder_sequence_id and context.sequence_id:
visible = mtf.equal(context.sequence_id, context.encoder_sequence_id)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if (layer.relative_attention_type == "bias" or
layer.relative_attention_type == "bias_shared"):
buckets_dim = mtf.Dimension(
"buckets", layer.relative_attention_num_buckets)
bias_shape = context.model.ensemble_dims + heads_dims + [buckets_dim]
values = None
cache = layer.relative_attention_type == "bias_shared"
if cache:
cache_key = ("enc_dec_relative_attention_bias", tuple(heads_dims))
if cache_key in context.cache:
values = context.cache[cache_key]
if values is None:
values = mtf.get_variable(
context.mesh, "enc_dec_relative_attention_bias",
bias_shape, dtype=context.variable_dtype)
if cache:
context.cache[cache_key] = values
rel_pos = relative_position_fn(context)
rp_bucket = _relative_position_bucket(
rel_pos,
bidirectional=True,
num_buckets=buckets_dim.size)
biases.append(mtf.gather(values, rp_bucket, buckets_dim))
elif layer.relative_attention_type is not None:
raise ValueError("unrecognized relative_attention_type \"%s\"" %
layer.relative_attention_type)
return mtf.add_n(biases) if biases else None
@gin.configurable
def enc_dec_attention(self_attention_layer, memory_antecedent, context, x,
losses, attention_fn=attention.attention,
z_loss_coeff=None):
"""Multi-head attention over the encoder outputs."""
memory_input_dim = memory_antecedent.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
params = self_attention_layer.make_params(context)
q = params.compute_q(x)
if context.mode == "incremental":
k, v, memory_length = context.get_constant_state()
else:
m = memory_antecedent
if self_attention_layer.shared_kv:
kv = params.compute_kv(m)
k = kv
v = kv
else:
k = params.compute_k(m)
v = params.compute_v(m)
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((k, v, memory_length))
bias = enc_dec_attention_bias(self_attention_layer,
context,
params.query_heads_dims)
a = attention_fn(
q, k, v, memory_length, self_attention_layer.kv_dim,
self_attention_layer.kv_dim, bias,
context=context,
z_loss_coeff=z_loss_coeff,
**self_attention_layer.attention_kwargs_from_context(context))
attention_output_shape = self_attention_layer.expected_attention_output_shape(
x, params)
attention_output = params.compute_output(
a, output_shape=attention_output_shape)
return self_attention_layer.layer_output_from_attention_output(
context, attention_output, losses)
@gin.configurable
class EncDecAttention(SelfAttention):
"""Multi-head attention over encoder output."""
def __init__(self, relative_attention_type=None, **kwargs):
super(EncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses,
attention_fn=self.attention_fn,
z_loss_coeff=self.z_loss_coeff)
@property
def attention_fn(self):
return attention.attention
@gin.configurable
class TransparentEncDecAttention(EncDecAttention):
"""Transparent multi-head attention over encoder output."""
def __init__(self,
layers_per_encoder_module=gin.REQUIRED,
layers_per_decoder_module=gin.REQUIRED,
encoder_num_modules=gin.REQUIRED,
decoder_num_modules=gin.REQUIRED,
dropout_rate=0.0,
**kwargs):
"""Create a transparent attention EncDec Layer.
Args:
layers_per_encoder_module: positive integer telling how many layer are in
each repeated module in the encoder
layers_per_decoder_module: positive integer telling how many layer are in
each repeated module in the decoder
encoder_num_modules: positive integer of how many repeated modules there
are in the encoder
decoder_num_modules: positive integer of how many repeated modules there
are in the decoder
dropout_rate: positive float, the dropout rate for the matrix relating
encoder outputs to decoder inputs
**kwargs: additional constructor params
"""
super(TransparentEncDecAttention, self).__init__(**kwargs)
self.layers_per_encoder_module = layers_per_encoder_module
self.layers_per_decoder_module = layers_per_decoder_module
self.encoder_num_modules = encoder_num_modules
self.decoder_num_modules = decoder_num_modules
self.dropout_rate = dropout_rate
def _get_memory_antecedent(self, context):
decoder_module_index = context.layer_index // self.layers_per_decoder_module
decoder_inputs = self._get_decoder_inputs(context)
return decoder_inputs[decoder_module_index]
def _get_decoder_inputs(self, context):
"""Computes the inputs to the decoder when using transparent attention.
We must cache on the context in order to ensure that we are not replicating
variables when the layer's call function is called in different tf variable
scopes.
Args:
context: a Context
Returns:
a list containing `self.num_decoder_modules` of tensors with shape
[<batch_dims>, length_dim, output_vocab_dim]
"""
if hasattr(context, "decoder_layers_per_module"):
return context.decoder_layers_per_module
encoder_layer_outputs = [
mtf.layers.rename_length_to_memory_length(output)
for output in context.encoder_layer_outputs
]
layers_per_module = self.layers_per_encoder_module
encoder_module_outputs_dim = mtf.Dimension(
"encoder_module_outputs", size=self.encoder_num_modules + 1)
decoder_module_inputs_dim = mtf.Dimension(
"decoder_module_inputs", size=self.decoder_num_modules)
encoder_module_outputs = mtf.stack(
[encoder_layer_outputs[0]] +
encoder_layer_outputs[layers_per_module::layers_per_module],
dim_name="encoder_module_outputs")
stddev = 1.0
if not mtf.layers.unit_scaling_convention():
stddev *= encoder_module_outputs_dim.size ** -0.5
w = mtf.get_variable(
context.mesh,
"w",
mtf.Shape([encoder_module_outputs_dim, decoder_module_inputs_dim]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=context.variable_dtype)
w = mtf.dropout(w, context.train, 1.0 - self.dropout_rate)
s = mtf.softmax(w, reduced_dim=encoder_module_outputs_dim)
z = mtf.layers.us_einsum([s, encoder_module_outputs],
reduced_dims=[encoder_module_outputs_dim])
input_per_decoder = mtf.split(
z,
split_dim=decoder_module_inputs_dim,
num_or_size_splits=decoder_module_inputs_dim.size)
context.decoder_layers_per_module = [
mtf.reshape(inpt, z.shape.dims[1:]) for inpt in input_per_decoder
]
return context.decoder_layers_per_module
@gin.configurable
class LocalSelfAttention(SelfAttention):
"""Multi-head local self-attention layer."""
def __init__(self,
radius=128,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,):
super(LocalSelfAttention, self).__init__(
num_heads,
num_memory_heads,
key_value_size,
shared_kv,
dropout_rate,
attention_kwargs)
self.radius = radius
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
if self.shared_kv:
kv = params.compute_kv(x)
k = kv
v = kv
else:
k = params.compute_k(x)
v = params.compute_v(x)
if context.mode == "incremental":
if self.shared_kv:
prev_kv, = context.get_states(1)
else:
prev_k, prev_v = context.get_states(2)
current_position = mtf.equal(
mtf.range(context.mesh, self.window_dim, dtype=tf.int32),
mtf.mod(context.position, self.radius))
if self.shared_kv:
kv = mtf.where(current_position, kv, prev_kv,
output_shape=prev_kv.shape)
k = kv
v = kv
context.record_new_states([kv])
else:
k = mtf.where(current_position, params.compute_k(x), prev_k,
output_shape=prev_k.shape)
v = mtf.where(current_position, params.compute_v(x), prev_v,
output_shape=prev_v.shape)
context.record_new_states([k, v])
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
visible = mtf.greater_equal(context.position, window_pos)
bias = attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype)
o = attention.attention(
q,
k,
v,
self.window_dim,
self.kv_dim,
self.kv_dim,
bias,
**self.attention_kwargs_from_context(context))
elif context.length_dim.size <= max(256, self.radius * 4):
# nothing fancy - just do full attention and mask
memory_length = self.rename_length_to_memory_length(
context.position, context)
o = attention.attention(
q, self.rename_length_to_memory_length(k, context),
self.rename_length_to_memory_length(v, context),
self.memory_length(context), self.kv_dim, self.kv_dim,
self.compute_bias(context, memory_length, x, params.query_heads_dims,
q), **self.attention_kwargs_from_context(context))
else:
# fancy local attention algorithm
o = attention.local_attention_1d(
q=q,
k=k,
v=None if self.shared_kv else v,
length_dim=context.length_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
length_dim_num_splits=1, # TODO(noam): look at the layout
autoregressive=context.model.fully_autoregressive,
radius=self.radius,
sequence_id=context.sequence_id,
write_priority=context.write_priority,
read_priority=context.read_priority,
attention_kwargs=self.attention_kwargs_from_context(context))
if context.mode == "first_part":
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
pos = mtf.range(context.mesh, context.length_dim, tf.int32)
select_recent = mtf.cast(
mtf.equal(mtf.mod(pos, self.radius), window_pos), x.dtype)
select_recent *= mtf.cast(
mtf.less(pos, context.initial_position), x.dtype)
select_recent *= mtf.cast(
mtf.greater_equal(
pos, context.initial_position - self.radius), x.dtype)
state_shape = (k.shape - [context.length_dim, self.kv_dim]
+ [self.window_dim, self.kv_dim])
k_state = mtf.einsum(
[k, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(k_state)
if not self.shared_kv:
v_state = mtf.einsum(
[v, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(v_state)
return params.compute_output(o, output_shape=x.shape)
def min_relative_position(self, context):
return 1 - self.radius
def max_relative_position(self, context):
return None if context.model.fully_autoregressive else self.radius
@property
def window_dim(self):
return mtf.Dimension("window", self.radius)
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = mtf.abs(n)
else:
n = mtf.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = mtf.less(n, max_exact)
val_if_large = max_exact + mtf.to_int32(
mtf.log(mtf.to_float(n) / max_exact)
/ math.log(max_distance / max_exact) * (num_buckets - max_exact))
val_if_large = mtf.minimum(val_if_large, num_buckets - 1)
ret += mtf.where(is_small, n, val_if_large)
return ret
@gin.configurable
class TalkingHeadsSelfAttention(SelfAttention):
"""Experimental Talking-heads self-attention layer.
https://arxiv.org/abs/2003.02436
This is a variant where there are (optionally) extra learned linear
projections on the attention logits and attention weights. These linear
projections are across attention heads (but not across different query or
memory positions).
The user specifies three sets of mtf.Dimension:
key_heads_dims: "heads" dimensions the queries, keys and ther dot-product
softmax_heads_dims: "heads" dimensions for the logits and their softmax
value_heads_dims: "heads" dimensions for the values
If these three sets are identical, then this layer is identical to ordinary
multi-head attention.
If key_heads_dims != softmax_heads_dims, then a learned linear projection
is applied to compute the logits. This projection reduces out dimensions
in (key_heads_dims-softmax_heads_dims) and inserts dimensions in
(softmax_heads_dims-key_heads_dims).
If softmax_heads_dims != value_heads_dims, then a learned linear
projection is applied to the weights (the output of the softmax). This
projection reduces out dimensions in (softmax_heads_dims-value_heads_dims)
and inserts dimensions in (value_heads_dims-softmax_heads_dims).
TPU performance is lousy due to small matrix sizes.
Early experiments show that quality can be significantly better than baseline.
An additional supported option is dynamic talking-heads projections where the
talking-heads projections themselves contain terms that depend on the inputs.
Each of the logits-projection and the weights-projection can depend on either
or both of the query-antecedent X or the memory-antecedent Y. This gives
a total of four dynamic projections which can be enabled individually.
To enable, set the dynamic_projections argument to a list containing a
some or all of the strings ["x2l", "m2l", "x2w", "m2w"].
Example:
TalkingHeadsSelfAttention.key_heads_dims = [("key_heads", 12)]
TalkingHeadsSelfAttention.softmax_heads_dims = [("heads", 32)]
TalkingHeadsSelfAttention.value_heads_dims = [("value_heads", 12)]
TalkingHeadsSelfAttention.key_size = 64
TalkingHeadsSelfAttention.value_size = 64
d_model = 1024
We start with an input x
x: [length, d_model]
The input is first transformed into queries, keys and values:
queries: [query_length, key_heads, key_size]
keys: [memory_length, key_heads, key_size]
values: [memory_length, value_heads, value_size]
queries and keys get einsummed to produce a tensor p:
p: [query_length, memory_length, key_heads]
p gets linearly transformed with a learned weight matrix with shape
[key_heads, softmax_heads] to produce logits
logits: [query_length, memory_length, softmax_heads]
take the softmax of logits (across memory_length to produce weights)
h: [query_length, memory_length, softmax_heads]
Now a learned linear projection with shape [softmax_heads, value_heads]
on h produces the weights.
weights: [query_length, memory_length, value_heads]
As usual, we einsum the weights with the values.
o: [query_length, value_heads, value_size]
Finally, project o back to the desired output dimension
y: [query_length, d_model]
Also, this doesn't model-parallelize trivially. To model-parallelize, you
should add one heads-dimension that is present in all of key_heads_dims,
softmax_heads_dims, value_heads_dims. Call this dimension "heads" and shard
that over multiple devices. Then also include additional different
heads-dimension for the keys, softmax, and values.
"""
def __init__(self, # pylint: disable=super-init-not-called
key_heads_dims=(("heads", 12),),
softmax_heads_dims=(("heads", 12),),
value_heads_dims=(("heads", 12),),
key_size=64,
value_size=64,
dropout_rate=0.0,
relative_attention_type=None,
relative_attention_num_buckets=32,
dynamic_projections=None,
dynamic_projections_init_scale=1e-2):
"""Create a SelfAttention Layer.
Args:
key_heads_dims: a list of mtf.Dimension or (name, size) pairs
softmax_heads_dims: a list of mtf.Dimension or (name, size) pairs
value_heads_dims: a list of mtf.Dimension or (name, size) pairs
key_size: an integer
value_size: an integer
dropout_rate: a float
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
dynamic_projections: an optional sequence containing a subset of
["x2l", "m2l", "x2w", "m2w"] (see class comments)
dynamic_projections_init_scale: a float - initializer variance scaling
factor for these dynamic projections. We have observed learning
difficulties when this value is too large.
"""
self.key_heads_dims = [mtf.convert_to_dimension(d) for d in key_heads_dims]
self.softmax_heads_dims = [
mtf.convert_to_dimension(d) for d in softmax_heads_dims]
self.value_heads_dims = [
mtf.convert_to_dimension(d) for d in value_heads_dims]
self.key_dim = mtf.Dimension("d_k", key_size)
self.value_dim = mtf.Dimension("d_v", value_size)
self.dropout_rate = dropout_rate
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dynamic_projections = dynamic_projections or []
self.dynamic_projections_init_scale = dynamic_projections_init_scale
def compute_q(self, context, x):
# Scale the initializer variance by 1.0/d_k
# This scales the initializer by rsqrt(d_k)
init_scale = 1.0
if not mtf.layers.unit_scaling_convention():
init_scale /= self.key_dim.size
kernel_initializer = mtf.layers.VarianceScalingInitializer(init_scale)
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.key_heads_dims + [self.key_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="q", expert_dims=context.model.ensemble_dims,
kernel_initializer=kernel_initializer)
def compute_k(self, context, x):
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.key_heads_dims + [self.key_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="k", expert_dims=context.model.ensemble_dims)
def compute_v(self, context, x):
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.value_heads_dims + [self.value_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="v", expert_dims=context.model.ensemble_dims)
def compute_y(self, context, u):
return mtf.layers.dense(
u, reduced_dims=self.value_heads_dims + [self.value_dim],
new_dims=[context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="y", expert_dims=context.model.ensemble_dims)
def call(self, context, x, losses=None):
"""Call the layer."""
memory_length = self.memory_length(context)
q = self.compute_q(context, x)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
k = self.compute_k(context, m)
v = self.compute_v(context, m)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([k, v])
bias = self.compute_bias(context, memory_position, x,
self.softmax_heads_dims, q)
return self.attention_internal(context, x, m, q, k, v, memory_length, bias)
def attention_internal(self, context, x, m, q, k, v, memory_length, bias):
p = mtf.layers.us_einsum([q, k], reduced_dims=[self.key_dim])
logits = self.talking_heads(
context, p, "logits", self.key_heads_dims, self.softmax_heads_dims,
dynamic_projections_from=(
([x] if "x2l" in self.dynamic_projections else []) +
([m] if "m2l" in self.dynamic_projections else [])))
if bias is not None:
logits += bias
h = mtf.softmax(logits, memory_length)
weights = self.talking_heads(
context, h, "weights", self.softmax_heads_dims, self.value_heads_dims,
dynamic_projections_from=(
([x] if "x2w" in self.dynamic_projections else []) +
([m] if "m2w" in self.dynamic_projections else [])))
# TODO(noam): make dropout_broadcast_dims configurable
dropout_broadcast_dims = [context.length_dim]
weights = mtf.dropout(
weights, context.train, rate=self.dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
u = mtf.einsum([weights, v], reduced_dims=[memory_length])
return self.compute_y(context, u)
def talking_heads(
self, context, inp, name, input_heads_dims, output_heads_dims,
dynamic_projections_from=None):
shared_dims = [d for d in input_heads_dims if d in output_heads_dims]
reduced_dims = [d for d in input_heads_dims if d not in output_heads_dims]
new_dims = [d for d in output_heads_dims if d not in input_heads_dims]
if not (reduced_dims or new_dims):
# Output dimensions are same as input dimensions. Return the input
return inp
elif dynamic_projections_from:
# There are one or more dynamic talking-heads-projections
with tf.variable_scope(name):
# static projection - this is the same as the static projection in the
# "else" case below. We create the weight matrix with get_variable
# instead of calling mtf.layers.dense() so that we can fold the
# static projection into one of the dynamic projections.
static_p_initializer = mtf.layers.VarianceScalingInitializer()(
reduced_dims, new_dims)
static_p_shape = (
context.model.ensemble_dims + shared_dims + reduced_dims + new_dims)
static_p = mtf.get_variable(inp.mesh,
"kernel",
static_p_shape,
initializer=static_p_initializer,
dtype=context.variable_dtype)
ps = []
for i, dp_from in enumerate(dynamic_projections_from):
init_scale = self.dynamic_projections_init_scale
if not mtf.layers.unit_scaling_convention():
init_scale /= mtf.Shape(reduced_dims).size
kernel_initializer = mtf.layers.VarianceScalingInitializer(
init_scale)
ps.append(
mtf.layers.dense(
dp_from, reduced_dims=[context.model.model_dim],
new_dims=shared_dims + reduced_dims + new_dims,
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="%s_dynamic_%d" % (name, i),
expert_dims=context.model.ensemble_dims,
kernel_initializer=kernel_initializer))
# Fold the static projection into one of the static projections.
# Mathematically, we could add all the dynamic projections together
# here, but it would create a very large tensor which contained
# both the query-length and memory-length dimensions, and would
# probably be slower in practice.
ps[0] += static_p
return mtf.add_n(
[mtf.layers.us_einsum([inp, p], reduced_dims=reduced_dims)
for p in ps])
else:
# No dynamic projections. Static talking-heads projection only
return mtf.layers.dense(
inp, reduced_dims=reduced_dims,
new_dims=new_dims,
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name=name, expert_dims=context.model.ensemble_dims + shared_dims)
@gin.configurable
class TalkingHeadsEncDecAttention(TalkingHeadsSelfAttention):
"""Talking-heads attention over encoder output.
See comments on TalkingHeadsSelfAttention.
"""
def __init__(self, relative_attention_type=None, **kwargs):
super(TalkingHeadsEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
m = self._get_memory_antecedent(context)
memory_input_dim = m.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
q = self.compute_q(context, x)
if context.mode == "incremental":
k, v, memory_length = context.get_constant_state()
else:
k = self.compute_k(context, m)
v = self.compute_v(context, m)
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((k, v, memory_length))
bias = enc_dec_attention_bias(self,
context,
self.softmax_heads_dims)
return self.attention_internal(context, x, m, q, k, v, memory_length, bias)
@gin.configurable
class GeneralBilinearSelfAttention(SelfAttention):
"""General Bilinear Self-Attention.
Described in the forthcoming talking-heads paper.
Equivalent to multi-head attentino where d_kv == d_model.
It is redundant to have projections on both q and k.
It is redundant to have projections on both v and output.
We therefore omit the projections on k and v, making the two identical.
"""
def __init__(self, # pylint: disable=super-init-not-called
heads_dims=(("heads", 12),),
dropout_rate=0.0,
relative_attention_type=None,
relative_attention_num_buckets=32):
"""Create a GeneralBilinearSelfAttention Layer.
Args:
heads_dims: a list of mtf.Dimension or (name, size) pairs
dropout_rate: a float
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
"""
self.heads_dims = [
mtf.convert_to_dimension(d) for d in heads_dims]
self.dropout_rate = dropout_rate
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
def compute_q(self, context, x):
# Scale the initializer variance by 1.0/d_k
# This scales the initializer by rsqrt(d_k)
init_scale = 1.0
if not mtf.layers.unit_scaling_convention():
init_scale /= context.model.model_dim.size
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.heads_dims + [context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="q", expert_dims=context.model.ensemble_dims,
kernel_initializer=mtf.layers.VarianceScalingInitializer(init_scale))
def compute_y(self, context, u):
return mtf.layers.dense(
u, reduced_dims=self.heads_dims + [context.model.model_dim],
new_dims=[context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="y", expert_dims=context.model.ensemble_dims)
def call(self, context, x, losses=None):
"""Call the layer."""
memory_length = self.memory_length(context)
q = self.compute_q(context, x)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_m, = context.get_states(1)
m = old_m * inv_one_hot + one_hot * m
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([m])
bias = self.compute_bias(context, memory_position, x, self.heads_dims, q)
return self.attention_internal(context, q, m, memory_length, bias)
def attention_internal(self, context, q, m, memory_length, bias):
logits = mtf.layers.us_einsum(
[q, m], reduced_dims=[context.model.model_dim])
if bias is not None:
logits += bias
weights = mtf.softmax(logits, memory_length)
# TODO(noam): make dropout_broadcast_dims configurable
dropout_broadcast_dims = [context.length_dim]
weights = mtf.dropout(
weights, context.train,
rate=self.dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
u = mtf.einsum([weights, m], reduced_dims=[memory_length])
return self.compute_y(context, u)
@gin.configurable
class GeneralBilinearEncDecAttention(GeneralBilinearSelfAttention):
"""Talking-heads attention over encoder output.
See comments on GBMSelfAttention.
"""
def __init__(self, relative_attention_type=None, **kwargs):
super(GeneralBilinearEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
memory_antecedent = self._get_memory_antecedent(context)
memory_input_dim = memory_antecedent.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
q = self.compute_q(context, x)
if context.mode == "incremental":
m, memory_length = context.get_constant_state()
else:
m = memory_antecedent
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((m, memory_length))
bias = enc_dec_attention_bias(self,
context,
self.heads_dims)
return self.attention_internal(context, q, m, memory_length, bias)
@gin.configurable
class BranchedSelfAttention(SelfAttention):
"""Branched self attention."""
def __init__(self, **kwargs):
super(BranchedSelfAttention, self).__init__(
combine_dims=False, keep_query_heads_dims=True, **kwargs)
if self.num_memory_heads != 0:
raise ValueError("Set num_memory_heads to 0 for branched attention.")
self.dense_layer = DenseReluDense()
self.kappa_init = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
self.alpha_init = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
def _constraint(self, z):
"""Keep z non-negative and summing to 1."""
z = mtf.relu(z)
return z / mtf.reduce_sum(z + 10**-4)
def layer_output_from_attention_output(self, context, attention_output,
losses):
heads_dim = mtf.Dimension("heads", self.num_heads)
kappa = mtf.get_variable(
context.mesh,
"kappa",
mtf.Shape([heads_dim]),
initializer=self.kappa_init,
dtype=context.variable_dtype,
constraint=self._constraint)
alpha = mtf.get_variable(
context.mesh,
"alpha",
mtf.Shape([heads_dim]),
initializer=self.alpha_init,
dtype=context.variable_dtype,
constraint=self._constraint)
o = mtf.einsum([attention_output, kappa],
output_shape=attention_output.shape)
o = self.dense_layer.call(context, o, losses)
o = mtf.einsum([o, alpha], output_shape=o.shape)
o = mtf.reduce_sum(o, reduced_dim=heads_dim)
return o
@gin.configurable
class BranchedEncDecAttention(BranchedSelfAttention):
"""Branched attention over encoder output."""
def __init__(self, relative_attention_type=None, **kwargs):
super(BranchedEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses)
class Conv1D(transformer.TransformerLayer):
"""Parent class for convolutional layers for common decoding logics.
When convolutional layers are used in the decoder, the incremental decoding
requires common features such as storing and accessing the recurrent state
information. These features do not depend on the specifics of the
convolutional layer (e.g., depthwise convolution, lightweight) as long as they
have the fixed receptive field defined by the filter size. This class
provides the methods for such features.
"""
def record_states_first_part_mode(self,
context,
x,
filter_size,
length_dim_name="length"):
"""Record the states during the first part mode.
l: current layer index
k: convolution filter size
x(l): input tensor to layer `l` for the first_part mode with the shape
[<batch_dims>, length, d_model].
The first_part mode is called once before the incremental mode is called for
the actual decoding process. The purpose is to set the recurrent states in
context.states, which are accessed during the incremental mode via
context.get_states. There are two cases depending on partial sequences are
present or not.
1) with partial sequences
When partial sequences are present, we decode from the position after the
partial sequence, but we need to use the information contained in the
partial sequence.
x(l) = [x1, x2, 0, 0, 0]
context.initial_position = 2 (the actual decoding should start from index
2).
Then we record the state = [0, x1, x2]. If partial sequences are shorter
than the filter size, we zero pad from the left.
2) Without partial sequences
x(l) = [0, 0, 0, 0, 0]
context.initial_position = 0
Then we record the state = [0, 0, 0]
These two cases can be handled with the following pseudocode. Let
i = context.initial_position.
state = x[:, i-filter_size:i, :] and store this as state.
Equivalently we can shift x by filter_size and slice
shifted_x = shift(x, length_dim)
state = shifted_x[:, i:i + filter_size, :]
Args:
context: a transformer.Context.
x: a Tensor.
filter_size: an intger - convolution filter size.
length_dim_name: a string - a dimension name for the length mtf.Dimension.
"""
length_dim = x.shape.dims[-2]
# Slice shifted_x[:, i:i + self.filter_size, :]
filter_dim = mtf.Dimension(length_dim_name, filter_size)
indices = mtf.range(x.mesh, filter_dim, dtype=tf.int32)
indices = context.initial_position + indices
# Assumes that x.shape = [<batch_dims>, length_dim, model_dim]
output_shape = mtf.Shape(x.shape.dims[:-2] + [filter_dim] +
x.shape.dims[-1:])
shifted_x = mtf.shift(x, filter_size, length_dim, wrap=False)
state = mtf.gather(
shifted_x, indices, length_dim, output_shape=output_shape)
context.record_new_states([state])
def record_states_incremental_mode(self, context, x, filter_size,
length_dim_name="length"):
"""Record the states during the first part mode.
l: current layer index
t: current decoding time step
k: convolution filter size
x(l, t): input vector to layer `l` at time step `t` for the incremental
mode with the shape [<batch_dims>, d_model].
During the incremental mode, the input to the conv layer x(l, t) does not
have the length dim because the input vector x corresponds to the current
decoding time step. We want to restore the input to the current layer in the
previous time steps (stored in the context.states) and combine with the
input at the current time step. This method does the following.
1) Restore the states: [x(l, t-k), ..., x(l, t-1)]
2) Combine with the current input: [x(l, t-k+1), ..., x(l, t-1), x(l, t)]
3) Store the new state and return it to be used as an input to the conv
layer.
It is important to note that the state being recorded is not used by the
next layer; it is used by the same layer but at the future time steps.
Args:
context: a transformer.Context.
x: a Tensor.
filter_size: an intger - convolution filter size.
length_dim_name: a string - a dimension name for the length mtf.Dimension.
Returns:
x: a Tensor of shape [<batch_dims>, filter_size, d_model].
"""
# Augment x with the states
filter_dim = mtf.Dimension(length_dim_name, filter_size)
input_state = context.get_states(1)[0]
position = mtf.constant(
x.mesh,
filter_size - 1, # Always use the last position.
shape=mtf.Shape(x.shape.dims[:-1]), # Pick out batch dims.
dtype=tf.int32)
# [batch, d_model] -> [batch, filter, d_model]
x = self.update_state(
input_state, x, position, filter_dim, dtype=context.activation_dtype)
# new state include the input for [t-filter, ..., t] steps.
context.record_new_states([x])
return x
def update_state(self, old_state, x, position, filter_dim, dtype):
"""Augment the current input to the old state.
[x(l, t-k), ..., x(l, t-1)], x(l, t) ->
[x(l, t-k+1), ..., x(l, t-1), x(l, t)]
Args:
old_state: a Tensor of shape [<batch_dims>, filter_size, d_model]
x: a Tensor of shape [<batch_dims>, d_model]
position: a Tensor of shape [<batch_dims>]
filter_dim: an mtf.Dimension corresponding to the filter size.
dtype: a mtf.VariableDType
Returns:
new_state: a Tensor of shape [<batch_dims>, filter_size, d_model].
"""
# [<batch_dims>, length, d_model]
shifted_state = mtf.shift(old_state, -1, filter_dim, wrap=False)
# [<batch_dims>, length]
one_hot = mtf.one_hot(position, filter_dim, dtype=dtype)
# [<batch_dims>, length, d_model]
shifted_x = one_hot * x
new_state = shifted_state + shifted_x
return new_state
@gin.configurable
class Conv1DLayer(Conv1D):
"""1D convolution over sequence length with model dim as channels.
One caveat is that this layer does nothing to stop information from bleeding
across packed examples.
"""
def __init__(self, filter_size, output_size, activation="linear"): # pylint: disable=super-init-not-called
"""Create a Conv1DLayer.
Args:
filter_size: a positive integer, the size of convolutional kernel.
output_size: a positive integer, the number of channels in the output.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
"""
self._filter_size = filter_size
self._output_size = output_size
self._activation = activation
def call(self, context, x, losses=None):
"""Call the layer."""
if context.mode == "first_part":
self.record_states_first_part_mode(context, x, self.filter_size)
if context.mode == "incremental":
x = self.record_states_incremental_mode(context, x, self.filter_size)
padding = "VALID"
else:
# The first_part mode also needs masking because it may have partial
# sequences.
mask = mtf.cast(
mtf.not_equal(context.inputs, 0), context.activation_dtype)
x *= mask
padding = "SAME"
model_dim = x.shape.dims[-1]
input_dim = mtf.Dimension("input_dim", model_dim.size)
x = mtf.replace_dimensions(x, model_dim, input_dim)
output_dim = mtf.Dimension(model_dim.name, self._output_size)
output = mtf.layers.conv1d(
x,
output_dim=output_dim,
filter_size=self._filter_size,
padding=padding,
filter_initializer=tf.glorot_uniform_initializer())
if context.mode == "incremental":
filter_dim = mtf.Dimension("length", self.filter_size)
# [batch_dims, 1, output_dim] -> [batch_dims, output_dim]
output = mtf.reduce_sum(
output, reduced_dim=mtf.Dimension(filter_dim.name, 1))
if self._activation != "linear":
activation_fn = getattr(mtf, self._activation)
output = activation_fn(output)
return output
@property
def filter_size(self):
return self._filter_size
@gin.configurable
class SeparableConv1DLayer(Conv1D):
"""1D separable convolution over sequence length with model dim as channels.
One caveat is that this layer does nothing to stop information from bleeding
across packed examples.
"""
def __init__(self, # pylint: disable=super-init-not-called
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear"):
"""Create a SeparableConv1DLayer.
The filter size will be `max_relative_pos - min_relative_pos + 1`.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
"""
self._min_relative_pos = min_relative_pos
self._max_relative_pos = max_relative_pos
self._output_size = output_size
self._depthwise_filter_initializer_scale = depthwise_filter_initializer_scale
self._pointwise_filter_initializer_scale = pointwise_filter_initializer_scale
self._activation = activation
def call(self, context, x, losses=None, all_kernel_wts=None):
"""Call the layer."""
if context.mode == "first_part":
self.record_states_first_part_mode(context, x, self.filter_size)
if context.mode == "incremental":
x = self.record_states_incremental_mode(context, x, self.filter_size)
else:
# Mask padding.
# TODO(karishmamalkan): Change the inputs_for_mask_creation to use decoder
# when using with decoder
inputs_for_mask_creation = context.inputs
mask = mtf.cast(
mtf.not_equal(inputs_for_mask_creation, 0), context.activation_dtype)
x *= mask
model_dim = x.shape.dims[-1]
output_dim = mtf.Dimension(model_dim.name, self._output_size)
output = mtf.layers.separable_conv1d(
x,
output_dim=output_dim,
min_relative_pos=self._min_relative_pos,
max_relative_pos=self._max_relative_pos,
depthwise_filter_initializer_scale=self
._depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale=self
._pointwise_filter_initializer_scale,
use_bias=True,
kernel_depth_weights=all_kernel_wts)
if context.mode == "incremental":
filter_dim = mtf.Dimension("length", self.filter_size)
# Drop unnecessary portion [batch, length, d_model] -> [batch, d_model]
# Only the last sequence position is relevant.
output = mtf.gather(output, [self.filter_size - 1], filter_dim)
if self._activation != "linear":
activation_fn = getattr(mtf, self._activation)
output = activation_fn(output)
return output
@property
def filter_size(self):
return self._max_relative_pos - self._min_relative_pos + 1
@gin.configurable
class Conv1DLocalAttn(SeparableConv1DLayer):
"""Lightweight 1D separable convolution over sequence length with d_model as channels.
Lightweight 1D separable convolution over sequence length, with separated over
model_dim as channels, containing a fixed number of unique channels
repeated/stacked over the model_dim.
"""
def __init__(self,
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear",
num_unique_depth_filters=1):
"""Create a LightweightConv1DLayer.
The filter size will be `max_relative_pos - min_relative_pos + 1`
The value of the Filter is depthwise separable, and the filter is tied and
repeats at every "num_unique_depth_filters" elements.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
num_unique_depth_filters: The number of unique depth filter values. The
unique filter is repeated along the depth dim every
num_unique_depth_filters elements.
"""
super(Conv1DLocalAttn,
self).__init__(min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
self._num_unique_depth_filters = num_unique_depth_filters
assert (self._output_size % self._num_unique_depth_filters == 0), (
"The number of elements in the unique depth filter should exactly "
"divide the number of output channels. You set "
"num_unique_depth_filters=%d, output_size(num_output_channels)=%d") % (
self._num_unique_depth_filters, self._output_size)
def call(self, context, x, losses=None):
"""Call the layer."""
depth_dim = x.shape.dims[-1]
initializer_scale = self._depthwise_filter_initializer_scale or 1.0
kernel_size = self._max_relative_pos - self._min_relative_pos + 1
kernel_initializer = mtf.layers.VarianceScalingInitializer(
scale=initializer_scale / kernel_size)
repeated_kernel_dim = [
mtf.Dimension(
"repeated_kernel_dim",
size=int(depth_dim.size / self._num_unique_depth_filters))
]
all_kernel_wts = []
for i in range(kernel_size):
# get a kernel variable of size num_unique_depth_filters, and replicate it
# to span the size of the complete depth(d_model) of x
kernel_wt = self.get_kernel_wt(
x,
repeated_kernel_dim=repeated_kernel_dim,
kernel_initializer=kernel_initializer,
i=i,
context=context,
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
repeated_kernel_wts = [kernel_wt] * self._num_unique_depth_filters
repeated_kernel_wts_concat = mtf.concat(
repeated_kernel_wts, concat_dim_name="repeated_kernel_dim")
repeated_kernel_wts_concat = mtf.rename_dimension(
repeated_kernel_wts_concat, "repeated_kernel_dim", "d_model")
all_kernel_wts.append(repeated_kernel_wts_concat)
# modify the kernel weights, such that they are softmaxed over the width of
# the kernel. We do this by stacking the individual kernel positions,
# performing the softmax, and then re-separating the stack.
stacked_kernel_weights = mtf.stack(all_kernel_wts, "new_stack_dim")
softmaxed_kernel_weights = mtf.softmax(
stacked_kernel_weights, reduced_dim=stacked_kernel_weights.shape[0]
) # the softmax is calculated over the new_stack_dim we created
unstacked_kernel_weights = mtf.unstack(softmaxed_kernel_weights,
stacked_kernel_weights.shape[0])
return super(Conv1DLocalAttn, self).call(context, x, losses,
unstacked_kernel_weights)
@gin.configurable
class LightweightConv1DLocalAttn(Conv1DLocalAttn):
"""Lightweight 1D separable convolution over seq_len with d_model as channels.
Lightweight 1D separable convolution over sequence length, with separated over
model_dim as channels, containing a fixed number of unique channels
repeated/stacked over the model_dim.
"""
def get_kernel_wt(self,
x,
repeated_kernel_dim,
kernel_initializer,
i,
context,
variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32):
kernel_wt = mtf.layers.get_dense_kernel_weights(
x,
new_dims=[],
reduced_dims=[],
expert_dims=repeated_kernel_dim,
kernel_initializer=kernel_initializer,
name="lightwt_depthwise_dense_%d" % (i),
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
return kernel_wt
@gin.configurable
class DynamicConv1DLocalAttn(Conv1DLocalAttn):
"""Dynamic 1D separable convolution over seq_len with d_model as channels.
Dynamic kernels predicted based on input at a position of the seq_len. Conv
operation separated over model_dim as channels, containing a fixed number of
unique channels repeated/stacked over the model_dim.
"""
def get_kernel_wt(self,
x,
repeated_kernel_dim,
kernel_initializer,
i,
context,
variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32):
kernel_wt = mtf.layers.dense(
x,
new_dims=repeated_kernel_dim,
reduced_dims=[context.model.model_dim],
expert_dims=[],
kernel_initializer=kernel_initializer,
name="dyn_conv_depthwise_dense_%d" % (i),
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
return kernel_wt
@gin.configurable
class LocalConvAttnBlock(transformer.TransformerLayer):
"""Conv Attention Block for Lightweight and dynamic conv attention.
Lightweight/Dynamic separable convolution over sequence length as described in
https://arxiv.org/pdf/1901.10430.pdf.
"""
def __init__(self,
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear",
num_unique_depth_filters=1,
attention_type="lightweight_conv"):
"""Create a LightweightConv1DAttnBlock.
The filter size will be `max_relative_pos - min_relative_pos + 1`
The value of the Filter is depthwise separable, and the filter is tied and
repeats at every "num_unique_depth_filters" elements.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
num_unique_depth_filters: The number of unique depth filter values. The
unique filter is repeated along the depth dim every
num_unique_depth_filters elements.
attention_type: Type of conv attn -"lightweight_conv"/"dynamic_conv"
"""
if attention_type == "lightweight_conv":
self.conv_local_attn_layer = LightweightConv1DLocalAttn(
min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
elif attention_type == "dynamic_conv":
self.conv_local_attn_layer = DynamicConv1DLocalAttn(
min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
else:
raise NotImplementedError("This attention type not implemented")
def call(self, context, x, losses=None):
"""Call the layer."""
gated_ip = mtf.layers.dense_product(
x,
reduced_dims=[context.model.model_dim],
new_dims=[context.model.model_dim],
activation_functions=["linear", "sigmoid"],
variable_dtype=context.variable_dtype,
name="local_conv_inp")
attn_output = self.conv_local_attn_layer.call(context, gated_ip, losses)
op_projection = mtf.layers.dense(
attn_output,
reduced_dims=[context.model.model_dim],
new_dims=[context.model.model_dim],
activation=None,
variable_dtype=context.variable_dtype,
name="local_conv_attn_op_projection")
return op_projection
@gin.configurable
class ParallelLayer(transformer.TransformerLayer):
"""Multiple layers in parallel.
Outputs are summed and divided by sqrt(n).
"""
def __init__(self,
layer_classes=(DenseReluDense, SelfAttention),
use_scope=True):
"""Create a ParallelLayer.
Args:
layer_classes: a list of TransformerLayer classes
use_scope: boolean, default True, which indicates whether to use unique
variable names for each parallel_layer. Here for backward compatibility.
"""
self.layer_classes = [l() for l in layer_classes]
self.use_scope = use_scope
def call(self, context, x, losses=None):
"""Call the layer."""
layer_outputs = []
if self.use_scope:
# Provide unique variable name scopes to avoid overwriting.
for i, l in enumerate(self.layer_classes):
with tf.variable_scope("parallel_layer_%d" % i):
layer_output = l.call(context, x, losses=losses)
layer_outputs.append(layer_output)
else:
layer_outputs = [
l.call(context, x, losses=losses) for l in self.layer_classes
]
return mtf.add_n(layer_outputs) * (len(self.layer_classes)**-0.5)
|
tensorflow/mesh
|
mesh_tensorflow/transformer/transformer_layers.py
|
Python
|
apache-2.0
| 91,110
|
[
"MOE"
] |
9f2463fd3aaead0997ab741624977a91a6a9f27cb1b324ee32a061f7c36b4135
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.